From c8e5462254c6f33520308eb8ce2b4b9d8061f23c Mon Sep 17 00:00:00 2001 From: ananas-block <58553958+ananas-block@users.noreply.github.com> Date: Tue, 10 Dec 2024 01:46:03 +0000 Subject: [PATCH] feat: batch address mt (#1365) * fix: stop zeroing out commitments proven by inclusion * feat: batch address tree feat: forester clears bloom filter feat: insert zkp batch feat: read only address * test: add tests and format * add readonly account tests * start refactor read only accounts * feat: read only accounts * fix: proof by index check in system program * refactor: tx hash --- Cargo.lock | 15 + Cargo.toml | 1 + .../src/batch_address_append.rs | 492 ++++++-- .../src/mock_batched_forester.rs | 148 +++ .../light-prover-client/tests/gnark.rs | 26 +- circuit-lib/verifier/src/lib.rs | 37 + .../programs/name-service/tests/test.rs | 5 +- .../src/escrow_with_compressed_pda/escrow.rs | 4 +- .../token-escrow/tests/test_compressed_pda.rs | 8 +- forester-utils/src/indexer/mod.rs | 35 +- forester/src/epoch_manager.rs | 2 +- forester/src/photon_indexer.rs | 27 +- forester/src/rollover/operations.rs | 2 +- forester/tests/test_utils.rs | 4 +- light-program-test/src/test_env.rs | 48 +- .../prover/batch_address_append_circuit.go | 1 + .../prover/marshal_batch_address_append.go | 3 +- merkle-tree/indexed/src/array.rs | 11 +- merkle-tree/indexed/src/lib.rs | 2 + merkle-tree/indexed/src/reference.rs | 6 +- merkle-tree/reference/src/lib.rs | 4 + programs/account-compression/src/errors.rs | 2 + .../src/instructions/append_leaves.rs | 6 +- .../src/instructions/batch_append.rs | 5 +- .../src/instructions/batch_nullify.rs | 5 +- .../instructions/batch_update_address_tree.rs | 46 + .../src/instructions/insert_into_queues.rs | 96 +- .../intialize_batch_address_merkle_tree.rs | 394 +++++++ .../intialize_batched_state_merkle_tree.rs | 82 +- .../src/instructions/mod.rs | 9 + .../rollover_batch_address_merkle_tree.rs | 484 ++++++++ .../rollover_batch_state_merkle_tree.rs | 22 +- programs/account-compression/src/lib.rs | 32 +- .../account-compression/src/state/batch.rs | 101 +- .../src/state/batched_merkle_tree.rs | 1034 ++++++++++++++--- .../src/state/batched_queue.rs | 96 +- .../src/utils/constants.rs | 6 + .../batch_update_address_tree.rs | 49 + .../initialize_batched_address_tree.rs | 43 + .../src/account_compression_cpi/mod.rs | 3 + .../rollover_batch_address_tree.rs | 54 + .../src/account_compression_cpi/sdk.rs | 86 +- programs/registry/src/lib.rs | 68 +- programs/system/src/errors.rs | 2 + programs/system/src/invoke/address.rs | 57 +- programs/system/src/invoke/emit_event.rs | 12 +- programs/system/src/invoke/instruction.rs | 14 + programs/system/src/invoke/nullify_state.rs | 4 - programs/system/src/invoke/processor.rs | 81 +- programs/system/src/invoke/sum_check.rs | 55 +- .../system/src/invoke/verify_state_proof.rs | 334 ++++-- programs/system/src/invoke_cpi/instruction.rs | 13 +- programs/system/src/invoke_cpi/processor.rs | 10 +- .../system/src/invoke_cpi/verify_signer.rs | 62 +- programs/system/src/lib.rs | 30 +- programs/system/src/sdk/address.rs | 78 +- programs/system/src/sdk/compressed_account.rs | 123 +- programs/system/src/sdk/invoke.rs | 1 + .../account-compression-test/Cargo.toml | 3 +- .../tests/address_merkle_tree_tests.rs | 1 + .../tests/batched_merkle_tree_test.rs | 746 +++++++++++- .../compressed-token-test/tests/test.rs | 14 +- .../create-address-test-program/Cargo.toml | 27 + .../create-address-test-program/Xargo.toml | 2 + .../src/create_pda.rs | 160 +++ .../create-address-test-program/src/lib.rs | 27 + test-programs/e2e-test/tests/test.rs | 6 +- test-programs/registry-test/tests/tests.rs | 306 ++++- test-programs/system-cpi-test/Cargo.toml | 2 + .../system-cpi-test/src/create_pda.rs | 323 ++++- test-programs/system-cpi-test/src/lib.rs | 10 +- test-programs/system-cpi-test/src/sdk.rs | 48 +- test-programs/system-cpi-test/tests/test.rs | 945 ++++++++++++++- test-programs/system-test/tests/test.rs | 256 +++- test-utils/Cargo.toml | 1 + test-utils/src/address.rs | 50 + test-utils/src/assert_compressed_tx.rs | 14 +- .../src/create_address_test_program_sdk.rs | 147 +++ test-utils/src/e2e_test_env.rs | 127 +- test-utils/src/indexer/test_indexer.rs | 332 ++++-- test-utils/src/lib.rs | 4 +- test-utils/src/system_program.rs | 4 +- test-utils/src/test_batch_forester.rs | 284 ++++- test-utils/src/test_forester.rs | 42 - 84 files changed, 7498 insertions(+), 863 deletions(-) create mode 100644 programs/account-compression/src/instructions/batch_update_address_tree.rs create mode 100644 programs/account-compression/src/instructions/intialize_batch_address_merkle_tree.rs create mode 100644 programs/account-compression/src/instructions/rollover_batch_address_merkle_tree.rs create mode 100644 programs/registry/src/account_compression_cpi/batch_update_address_tree.rs create mode 100644 programs/registry/src/account_compression_cpi/initialize_batched_address_tree.rs create mode 100644 programs/registry/src/account_compression_cpi/rollover_batch_address_tree.rs create mode 100644 test-programs/create-address-test-program/Cargo.toml create mode 100644 test-programs/create-address-test-program/Xargo.toml create mode 100644 test-programs/create-address-test-program/src/create_pda.rs create mode 100644 test-programs/create-address-test-program/src/lib.rs create mode 100644 test-utils/src/address.rs create mode 100644 test-utils/src/create_address_test_program_sdk.rs diff --git a/Cargo.lock b/Cargo.lock index dcde285844..4199750f89 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -48,6 +48,7 @@ dependencies = [ "anchor-spl", "ark-bn254", "ark-ff 0.4.2", + "light-bloom-filter", "light-bounded-vec", "light-compressed-token", "light-concurrent-merkle-tree", @@ -1794,6 +1795,17 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "create-address-test-program" +version = "1.0.0" +dependencies = [ + "account-compression", + "anchor-lang", + "light-hasher", + "light-system-program", + "light-utils", +] + [[package]] name = "crossbeam-channel" version = "0.5.13" @@ -4055,6 +4067,7 @@ dependencies = [ "anyhow", "ark-ff 0.4.2", "async-trait", + "create-address-test-program", "forester-utils", "light-client", "light-compressed-token", @@ -8534,9 +8547,11 @@ dependencies = [ "light-system-program", "light-test-utils", "light-utils", + "light-verifier", "num-bigint 0.4.6", "num-traits", "reqwest 0.11.27", + "serial_test", "solana-program-test", "solana-sdk", "spl-token", diff --git a/Cargo.toml b/Cargo.toml index 4a8b923551..f658db4ed9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,6 +27,7 @@ members = [ "test-programs/system-cpi-test/", "test-programs/system-test/", "test-programs/sdk-test-program/programs/sdk-test/", + "test-programs/create-address-test-program/", "forester-utils", "forester", "photon-api", diff --git a/circuit-lib/light-prover-client/src/batch_address_append.rs b/circuit-lib/light-prover-client/src/batch_address_append.rs index 5b0d314b18..31d4ca8693 100644 --- a/circuit-lib/light-prover-client/src/batch_address_append.rs +++ b/circuit-lib/light-prover-client/src/batch_address_append.rs @@ -1,4 +1,5 @@ use crate::helpers::{compute_root_from_merkle_proof, hash_chain}; + use light_bounded_vec::BoundedVec; use light_concurrent_merkle_tree::changelog::ChangelogEntry; use light_concurrent_merkle_tree::event::RawIndexedElement; @@ -30,7 +31,8 @@ pub struct BatchAddressAppendInputs { } #[allow(clippy::too_many_arguments)] -pub fn get_batch_address_append_inputs_from_tree( +pub fn get_batch_address_append_circuit_inputs( + // Onchain account merkle tree next index. next_index: usize, current_root: [u8; 32], low_element_values: Vec<[u8; 32]>, @@ -40,33 +42,59 @@ pub fn get_batch_address_append_inputs_from_tree( low_element_proofs: Vec>, new_element_values: Vec<[u8; 32]>, subtrees: [[u8; 32]; HEIGHT], + leaves_hashchain: [u8; 32], + // Merkle tree index at batch index 0. (Indexer next index) + batch_start_index: usize, + zkp_batch_size: usize, ) -> BatchAddressAppendInputs { + // 1. input all elements of a batch. + // 2. iterate over elements 0..end_index + // 3. only use elements start_index..end_index in the circuit (we need to + // iterate over elements prior to start index to create changelog entries to + // patch subsequent element proofs. The indexer won't be caught up yet.) + let inserted_elements = next_index - batch_start_index; + let end_index = inserted_elements + zkp_batch_size; + println!("next_index: {}", next_index); + println!("batch_start_index: {}", batch_start_index); + println!("inserted elements: {}", inserted_elements); + println!("end index {}", end_index); + let new_element_values = new_element_values[0..end_index].to_vec(); let mut new_root = [0u8; 32]; let mut low_element_circuit_merkle_proofs = vec![]; let mut new_element_circuit_merkle_proofs = vec![]; let mut changelog: Vec> = Vec::new(); - let mut indexed_changelog: Vec> = Vec::new(); + let mut indexed_changelog: Vec> = Vec::new(); let mut patched_low_element_next_values: Vec<[u8; 32]> = Vec::new(); let mut patched_low_element_next_indices: Vec = Vec::new(); - - let mut merkle_tree = SparseMerkleTree::::new(subtrees, next_index); - - for i in 0..low_element_values.len() { + let mut patched_low_element_values: Vec<[u8; 32]> = Vec::new(); + let mut patched_low_element_indices: Vec = Vec::new(); + let mut merkle_tree = SparseMerkleTree::::new(subtrees, batch_start_index); + + // TODO: remove after first iter works + let mut man_indexed_array = IndexedArray::::default(); + man_indexed_array.init().unwrap(); + let mut indexed_array = IndexedArray::::default(); + indexed_array.init().unwrap(); + let mut indexed_merkle_tree = IndexedMerkleTree::::new(HEIGHT, 0).unwrap(); + indexed_merkle_tree.init().unwrap(); + for i in 0..new_element_values.len() { + println!("get_batch_address_append_circuit_inputs i: {}", i); let mut changelog_index = 0; + println!("changelog_index first: {}", changelog_index); - let new_element_index = next_index + i; - let mut low_element: IndexedElement = IndexedElement { - index: low_element_indices[i] as u16, + let new_element_index = batch_start_index + i; + let mut low_element: IndexedElement = IndexedElement { + index: low_element_indices[i], value: BigUint::from_bytes_be(&low_element_values[i]), - next_index: low_element_next_indices[i] as u16, + next_index: low_element_next_indices[i], }; - let mut new_element: IndexedElement = IndexedElement { - index: new_element_index as u16, + let mut new_element: IndexedElement = IndexedElement { + index: new_element_index, value: BigUint::from_bytes_be(&new_element_values[i]), - next_index: low_element_next_indices[i] as u16, + next_index: low_element_next_indices[i], }; let mut low_element_proof: BoundedVec<[u8; 32]> = @@ -85,14 +113,18 @@ pub fn get_batch_address_append_inputs_from_tree( ) .unwrap(); } - patched_low_element_next_values - .push(bigint_to_be_bytes_array::<32>(&low_element_next_value).unwrap()); - patched_low_element_next_indices.push(low_element.next_index as usize); - - let new_low_element: IndexedElement = IndexedElement { + if i >= inserted_elements { + patched_low_element_next_values + .push(bigint_to_be_bytes_array::<32>(&low_element_next_value).unwrap()); + patched_low_element_next_indices.push(low_element.next_index()); + patched_low_element_indices.push(low_element.index); + patched_low_element_values + .push(bigint_to_be_bytes_array::<32>(&low_element.value).unwrap()); + } + let new_low_element: IndexedElement = IndexedElement { index: low_element.index, - value: low_element.value, - next_index: new_element_index as u16, + value: low_element.value.clone(), + next_index: new_element.index, }; let new_low_element_raw = RawIndexedElement { value: bigint_to_be_bytes_array::<32>(&new_low_element.value).unwrap(), @@ -101,19 +133,12 @@ pub fn get_batch_address_append_inputs_from_tree( index: new_low_element.index, }; - let low_element_changelog_entry = IndexedChangelogEntry { - element: new_low_element_raw, - proof: low_element_proof.as_slice()[..HEIGHT].try_into().unwrap(), - changelog_index, - }; - - indexed_changelog.push(low_element_changelog_entry); - { if i > 0 { - for change_log_entry in changelog.iter().skip(changelog_index + 1) { + println!("changelog_index second: {}", changelog_index); + for change_log_entry in changelog.iter().skip(changelog_index) { change_log_entry - .update_proof(low_element_indices[i], &mut low_element_proof) + .update_proof(low_element.index(), &mut low_element_proof) .unwrap(); } } @@ -121,27 +146,40 @@ pub fn get_batch_address_append_inputs_from_tree( let new_low_leaf_hash = new_low_element .hash::(&new_element.value) .unwrap(); - let (_, changelog_entry) = compute_root_from_merkle_proof( + println!("new_low_leaf_hash: {:?}", new_low_leaf_hash); + let (_updated_root, changelog_entry) = compute_root_from_merkle_proof( new_low_leaf_hash, &merkle_proof, new_low_element.index as u32, ); + println!("new_low_leaf_hash updated_root: {:?}", _updated_root); changelog.push(changelog_entry); - low_element_circuit_merkle_proofs.push( - merkle_proof - .iter() - .map(|hash| BigUint::from_bytes_be(hash)) - .collect(), - ); + if i >= inserted_elements { + low_element_circuit_merkle_proofs.push( + merkle_proof + .iter() + .map(|hash| BigUint::from_bytes_be(hash)) + .collect(), + ); + } } + let low_element_changelog_entry = IndexedChangelogEntry { + element: new_low_element_raw, + proof: low_element_proof.as_slice()[..HEIGHT].try_into().unwrap(), + changelog_index: indexed_changelog.len(), //changelog.len(), //change_log_index, + }; + indexed_changelog.push(low_element_changelog_entry); { - let new_element_value = low_element_next_value; - let new_element_leaf_hash = new_element.hash::(&new_element_value).unwrap(); + let new_element_next_value = low_element_next_value; + let new_element_leaf_hash = new_element + .hash::(&new_element_next_value) + .unwrap(); + println!("new_element_leaf_hash: {:?}", new_element_leaf_hash); let proof = merkle_tree.append(new_element_leaf_hash); let mut bounded_vec_merkle_proof = BoundedVec::from_slice(proof.as_slice()); - let current_index = next_index + i; + let current_index = batch_start_index + i; for change_log_entry in changelog.iter() { change_log_entry @@ -149,11 +187,8 @@ pub fn get_batch_address_append_inputs_from_tree( .unwrap(); } - let reference_root = compute_root_from_merkle_proof( - new_element_leaf_hash, - &proof, - (next_index + i) as u32, - ); + let reference_root = + compute_root_from_merkle_proof(new_element_leaf_hash, &proof, current_index as u32); assert_eq!(merkle_tree.root(), reference_root.0); let merkle_proof_array = bounded_vec_merkle_proof.to_array().unwrap(); @@ -161,51 +196,63 @@ pub fn get_batch_address_append_inputs_from_tree( let (updated_root, changelog_entry) = compute_root_from_merkle_proof( new_element_leaf_hash, &merkle_proof_array, - (next_index + i) as u32, + current_index as u32, ); new_root = updated_root; + println!("new_root: {:?}", new_root); changelog.push(changelog_entry); - new_element_circuit_merkle_proofs.push( - merkle_proof_array - .iter() - .map(|hash| BigUint::from_bytes_be(hash)) - .collect(), - ); - + if i >= inserted_elements { + new_element_circuit_merkle_proofs.push( + merkle_proof_array + .iter() + .map(|hash| BigUint::from_bytes_be(hash)) + .collect(), + ); + } let new_element_raw = RawIndexedElement { value: bigint_to_be_bytes_array::<32>(&new_element.value).unwrap(), next_index: new_element.next_index, - next_value: bigint_to_be_bytes_array::<32>(&new_low_element.value).unwrap(), + next_value: bigint_to_be_bytes_array::<32>(&new_element_next_value).unwrap(), index: new_element.index, }; let new_element_changelog_entry = IndexedChangelogEntry { element: new_element_raw, - proof: proof.as_slice()[..HEIGHT].try_into().unwrap(), - changelog_index: 0, + proof: merkle_proof_array, + changelog_index: indexed_changelog.len(), }; indexed_changelog.push(new_element_changelog_entry); + indexed_merkle_tree + .append(&new_element.value, &mut indexed_array) + .unwrap(); + println!( + "indexed_changelog {:?}", + indexed_changelog + .iter() + .map(|x| x.element) + .collect::>() + ); } } - let leaves_hashchain = hash_chain(&new_element_values); let hash_chain_inputs = vec![ current_root, new_root, leaves_hashchain, bigint_to_be_bytes_array::<32>(&next_index.into()).unwrap(), ]; + println!("hash_chain_inputs: {:?}", hash_chain_inputs); let public_input_hash = hash_chain(hash_chain_inputs.as_slice()); BatchAddressAppendInputs { - batch_size: new_element_values.len(), + batch_size: patched_low_element_values.len(), hashchain_hash: BigUint::from_bytes_be(&leaves_hashchain), - low_element_values: low_element_values + low_element_values: patched_low_element_values .iter() .map(|v| BigUint::from_bytes_be(v)) .collect(), - low_element_indices: low_element_indices + low_element_indices: patched_low_element_indices .iter() .map(|&i| BigUint::from(i)) .collect(), @@ -218,7 +265,7 @@ pub fn get_batch_address_append_inputs_from_tree( .map(|v| BigUint::from_bytes_be(v)) .collect(), low_element_proofs: low_element_circuit_merkle_proofs, - new_element_values: new_element_values + new_element_values: new_element_values[inserted_elements..] .iter() .map(|v| BigUint::from_bytes_be(v)) .collect(), @@ -328,29 +375,61 @@ pub fn get_test_batch_address_append_inputs( } } +/// Patch the indexed changelogs. +/// 1. find changelog entries of the same index +/// 2. iterate over entries +/// 2.1 if next_value < new_element.value patch element +/// 3. +#[inline(never)] pub fn patch_indexed_changelogs( indexed_changelog_index: usize, changelog_index: &mut usize, - indexed_changelogs: &mut Vec>, - low_element: &mut IndexedElement, - new_element: &mut IndexedElement, + indexed_changelogs: &mut Vec>, + low_element: &mut IndexedElement, + new_element: &mut IndexedElement, low_element_next_value: &mut BigUint, low_leaf_proof: &mut BoundedVec<[u8; 32]>, ) -> Result<(), IndexedMerkleTreeError> { + // println!( + // "indexed_changelog: {:?}", + // indexed_changelogs + // .iter() + // .map(|x| x.element) + // .collect::>() + // ); + // println!( + // "indexed_changelog: {:?}", + // indexed_changelogs + // .iter() + // .map(|x| x.changelog_index) + // .collect::>() + // ); + // println!("indexed_changelog_index: {}", indexed_changelog_index); let next_indexed_changelog_indices: Vec = (*indexed_changelogs) + [indexed_changelog_index..] .iter() + // .skip(1) .enumerate() .filter_map(|(index, changelog_entry)| { + // println!("low_element.index: {}", low_element.index); + // println!( + // "changelog_entry.element.index: {}", + // changelog_entry.element.index + // ); if changelog_entry.element.index == low_element.index { - Some((indexed_changelog_index + index) % indexed_changelogs.len()) + Some(indexed_changelog_index + index) // ) % indexed_changelogs.len() } else { None } }) .collect(); + // println!( + // "next_indexed_changelog_indices: {:?}", + // next_indexed_changelog_indices + // ); let mut new_low_element = None; - + // println!("new low element: {:?}", new_low_element); for next_indexed_changelog_index in next_indexed_changelog_indices { let changelog_entry = &mut indexed_changelogs[next_indexed_changelog_index]; @@ -360,15 +439,12 @@ pub fn patch_indexed_changelogs( // that it should become the low element. // // Save it and break the loop. - new_low_element = Some(( - (next_indexed_changelog_index + 1) % indexed_changelogs.len(), - next_element_value, - )); + new_low_element = Some(((next_indexed_changelog_index + 1), next_element_value)); break; } // Patch the changelog index. - *changelog_index = changelog_entry.changelog_index; + *changelog_index = changelog_entry.changelog_index + 1; // Patch the `next_index` of `new_element`. new_element.next_index = changelog_entry.element.next_index; @@ -385,7 +461,7 @@ pub fn patch_indexed_changelogs( // If we found a new low element. if let Some((new_low_element_changelog_index, new_low_element)) = new_low_element { let new_low_element_changelog_entry = &indexed_changelogs[new_low_element_changelog_index]; - *changelog_index = new_low_element_changelog_entry.changelog_index; + *changelog_index = new_low_element_changelog_entry.changelog_index + 1; *low_element = IndexedElement { index: new_low_element_changelog_entry.element.index, value: new_low_element.clone(), @@ -396,14 +472,28 @@ pub fn patch_indexed_changelogs( low_leaf_proof[i] = new_low_element_changelog_entry.proof[i]; } new_element.next_index = low_element.next_index; - + // println!( + // "recursing: new_low_element_changelog_index: {}", + // new_low_element_changelog_index + // ); + // println!("recursing: changelog_index: {}", changelog_index); + // // println!("recursing: indexed_changelogs: {:?}", indexed_changelogs); + // println!("recursing: low_element: {:?}", low_element); + // println!("recursing: new_element: {:?}", new_element); + // println!( + // "recursing: low_element_next_value: {:?}", + // low_element_next_value + // ); + if new_low_element_changelog_index == indexed_changelogs.len() - 1 { + return Ok(()); + } // Start the patching process from scratch for the new low element. patch_indexed_changelogs( new_low_element_changelog_index, changelog_index, indexed_changelogs, - new_element, low_element, + new_element, low_element_next_value, low_leaf_proof, )? @@ -411,3 +501,257 @@ pub fn patch_indexed_changelogs( Ok(()) } + +/// Performs conflicting Merkle tree updates where multiple actors try to add +/// add new ranges when using the same (for the most of actors - outdated) +/// Merkle proofs and changelog indices. +/// +/// Scenario: +/// +/// 1. Two paries start with the same indexed array state. +/// 2. Both parties compute their values with the same indexed Merkle tree +/// state. +/// 3. Party one inserts first. +/// 4. Party two needs to patch the low element, because the low element has +/// changed. +/// 5. Party two inserts. +/// 6. Party N needs to patch the low element, because the low element has +/// changed. +/// 7. Party N inserts. +/// +/// `DOUBLE_SPEND` indicates whether the provided addresses are an attempt to +/// double-spend by the subsequent parties. When set to `true`, we expect +/// subsequent updates to fail. +#[test] +fn test_indexed_changelog() { + use ark_std::rand::seq::SliceRandom; + use num_traits::FromPrimitive; + let rng = &mut ark_std::test_rng(); + for _ in 0..100 { + let mut indexed_array = IndexedArray::::default(); + indexed_array.init().unwrap(); + let mut indexed_merkle_tree = IndexedMerkleTree::::new(8, 0).unwrap(); + indexed_merkle_tree.init().unwrap(); + let mut man_indexed_array = IndexedArray::::default(); + man_indexed_array.init().unwrap(); + let mut addresses = vec![]; + for i in 2..100 { + let address = BigUint::from_usize(i).unwrap(); + addresses.push(address); + } + addresses.shuffle(rng); + + let next_index = indexed_merkle_tree.merkle_tree.rightmost_index; + let mut indexed_changelog: Vec> = Vec::new(); + let mut low_element_values = Vec::new(); + let mut low_element_indices = Vec::new(); + let mut low_element_next_indices = Vec::new(); + let mut low_element_next_values = Vec::new(); + let mut low_element_proofs: Vec> = Vec::new(); + // get inputs + for address in addresses.iter() { + let non_inclusion_proof = indexed_merkle_tree + .get_non_inclusion_proof(address, &indexed_array) + .unwrap(); + low_element_values.push(non_inclusion_proof.leaf_lower_range_value); + low_element_indices.push(non_inclusion_proof.leaf_index); + low_element_next_indices.push(non_inclusion_proof.next_index); + low_element_next_values.push(non_inclusion_proof.leaf_higher_range_value); + + low_element_proofs.push(non_inclusion_proof.merkle_proof.as_slice().to_vec()); + } + for i in 0..addresses.len() { + println!("\nunpatched {}-------------------", addresses[i]); + + let mut changelog_index = 0; + let new_element_index = next_index + i; + let mut low_element = IndexedElement { + index: low_element_indices[i], + value: BigUint::from_bytes_be(&low_element_values[i]), + next_index: low_element_next_indices[i], + }; + println!("unpatched low_element: {:?}", low_element); + let mut new_element = IndexedElement { + index: new_element_index, + value: addresses[i].clone(), + next_index: low_element_next_indices[i], + }; + println!("unpatched new_element: {:?}", new_element); + let mut low_element_proof = BoundedVec::from_slice(low_element_proofs[i].as_slice()); + let mut low_element_next_value = BigUint::from_bytes_be(&low_element_next_values[i]); + + if i > 0 { + patch_indexed_changelogs( + 0, + &mut changelog_index, + &mut indexed_changelog, + &mut low_element, + &mut new_element, + &mut low_element_next_value, + &mut low_element_proof, + ) + .unwrap(); + } + indexed_changelog.push(IndexedChangelogEntry { + element: RawIndexedElement { + value: bigint_to_be_bytes_array::<32>(&low_element.value).unwrap(), + next_index: new_element.index, + next_value: bigint_to_be_bytes_array::<32>(&new_element.value).unwrap(), + index: low_element.index, + }, + proof: low_element_proof.as_slice().to_vec().try_into().unwrap(), + changelog_index: indexed_changelog.len(), + }); + indexed_changelog.push(IndexedChangelogEntry { + element: RawIndexedElement { + value: bigint_to_be_bytes_array::<32>(&new_element.value).unwrap(), + next_index: new_element.next_index, + next_value: bigint_to_be_bytes_array::<32>(&low_element_next_value).unwrap(), + index: new_element.index, + }, + proof: low_element_proof.as_slice().to_vec().try_into().unwrap(), + changelog_index: indexed_changelog.len(), + }); + println!("patched -------------------"); + println!("changelog_index i: {}", changelog_index); + println!("low_element: {:?}", low_element); + println!("new_element: {:?}", new_element); + man_indexed_array.elements[low_element.index()] = low_element.clone(); + man_indexed_array.elements[low_element.index()].next_index = new_element.index; + man_indexed_array.elements.push(new_element); + if i > 0 { + let expected_low_element_value = + match addresses[0..i].iter().filter(|x| **x < addresses[i]).max() { + Some(x) => (*x).clone(), + None => BigUint::from_usize(0).unwrap(), + }; + assert_eq!(low_element.value, expected_low_element_value); + } + } + println!("indexed_changelog {:?}", indexed_changelog); + for address in addresses.iter() { + indexed_merkle_tree + .append(&address, &mut indexed_array) + .unwrap(); + } + println!("man_indexed_array {:?}", man_indexed_array); + println!("indexed_array {:?}", indexed_array); + + assert_eq!(indexed_array.elements, man_indexed_array.elements); + } +} + +#[test] +fn debug_test_indexed_changelog() { + use num_traits::FromPrimitive; + for _ in 0..1 { + let mut indexed_array = IndexedArray::::default(); + indexed_array.init().unwrap(); + let mut indexed_merkle_tree = IndexedMerkleTree::::new(8, 0).unwrap(); + indexed_merkle_tree.init().unwrap(); + let mut man_indexed_array = IndexedArray::::default(); + man_indexed_array.init().unwrap(); + let mut addresses = vec![]; + for i in 0..10 { + let address = BigUint::from_usize(101 - i).unwrap(); + addresses.push(address); + } + + let next_index = indexed_merkle_tree.merkle_tree.rightmost_index; + let mut indexed_changelog: Vec> = Vec::new(); + let mut low_element_values = Vec::new(); + let mut low_element_indices = Vec::new(); + let mut low_element_next_indices = Vec::new(); + let mut low_element_next_values = Vec::new(); + let mut low_element_proofs: Vec> = Vec::new(); + // get inputs + for address in addresses.iter() { + let non_inclusion_proof = indexed_merkle_tree + .get_non_inclusion_proof(address, &indexed_array) + .unwrap(); + low_element_values.push(non_inclusion_proof.leaf_lower_range_value); + low_element_indices.push(non_inclusion_proof.leaf_index); + low_element_next_indices.push(non_inclusion_proof.next_index); + low_element_next_values.push(non_inclusion_proof.leaf_higher_range_value); + low_element_proofs.push(non_inclusion_proof.merkle_proof.as_slice().to_vec()); + } + for i in 0..addresses.len() { + println!("\nunpatched {}-------------------", addresses[i]); + + let mut changelog_index = 0; + let new_element_index = next_index + i; + let mut low_element = IndexedElement { + index: low_element_indices[i], + value: BigUint::from_bytes_be(&low_element_values[i]), + next_index: low_element_next_indices[i], + }; + println!("unpatched low_element: {:?}", low_element); + let mut new_element = IndexedElement { + index: new_element_index, + value: addresses[i].clone(), + next_index: low_element_next_indices[i], + }; + println!("unpatched new_element: {:?}", new_element); + let mut low_element_proof = BoundedVec::from_slice(low_element_proofs[i].as_slice()); + let mut low_element_next_value = BigUint::from_bytes_be(&low_element_next_values[i]); + + if i > 0 { + patch_indexed_changelogs( + 0, + &mut changelog_index, + &mut indexed_changelog, + &mut low_element, + &mut new_element, + &mut low_element_next_value, + &mut low_element_proof, + ) + .unwrap(); + } + indexed_changelog.push(IndexedChangelogEntry { + element: RawIndexedElement { + value: bigint_to_be_bytes_array::<32>(&low_element.value).unwrap(), + next_index: new_element.index, + next_value: bigint_to_be_bytes_array::<32>(&new_element.value).unwrap(), + index: low_element.index, + }, + proof: low_element_proof.as_slice().to_vec().try_into().unwrap(), + changelog_index: indexed_changelog.len(), + }); + indexed_changelog.push(IndexedChangelogEntry { + element: RawIndexedElement { + value: bigint_to_be_bytes_array::<32>(&new_element.value).unwrap(), + next_index: new_element.next_index, + next_value: bigint_to_be_bytes_array::<32>(&low_element_next_value).unwrap(), + index: new_element.index, + }, + proof: low_element_proof.as_slice().to_vec().try_into().unwrap(), + changelog_index: indexed_changelog.len(), + }); + println!("patched -------------------"); + println!("changelog_index i: {}", changelog_index); + println!("low_element: {:?}", low_element); + println!("new_element: {:?}", new_element); + man_indexed_array.elements[low_element.index()] = low_element.clone(); + man_indexed_array.elements[low_element.index()].next_index = new_element.index; + man_indexed_array.elements.push(new_element); + if i > 0 { + let expected_low_element_value = + match addresses[0..i].iter().filter(|x| **x < addresses[i]).max() { + Some(x) => (*x).clone(), + None => BigUint::from_usize(0).unwrap(), + }; + assert_eq!(low_element.value, expected_low_element_value); + } + } + println!("indexed_changelog {:?}", indexed_changelog); + for address in addresses.iter() { + indexed_merkle_tree + .append(&address, &mut indexed_array) + .unwrap(); + } + println!("man_indexed_array {:?}", man_indexed_array); + println!("indexed_array {:?}", indexed_array); + + assert_eq!(indexed_array.elements, man_indexed_array.elements); + } +} diff --git a/circuit-lib/light-prover-client/src/mock_batched_forester.rs b/circuit-lib/light-prover-client/src/mock_batched_forester.rs index 917508cf01..1b611cf9e1 100644 --- a/circuit-lib/light-prover-client/src/mock_batched_forester.rs +++ b/circuit-lib/light-prover-client/src/mock_batched_forester.rs @@ -1,15 +1,19 @@ use std::fmt::Error; use light_hasher::{Hasher, Poseidon}; +use light_indexed_merkle_tree::{array::IndexedArray, reference::IndexedMerkleTree}; use light_merkle_tree_reference::MerkleTree; use light_utils::bigint::bigint_to_be_bytes_array; +use num_bigint::BigUint; use reqwest::Client; use crate::{ + batch_address_append::get_batch_address_append_circuit_inputs, batch_append_with_proofs::get_batch_append_with_proofs_inputs, batch_append_with_subtrees::calculate_hash_chain, batch_update::get_batch_update_inputs, gnark::{ + batch_address_append_json_formatter::to_json, batch_append_with_proofs_json_formatter::BatchAppendWithProofsInputsJson, batch_update_json_formatter::update_inputs_string, constants::{PROVE_PATH, SERVER_ADDRESS}, @@ -227,3 +231,147 @@ pub struct CompressedProof { pub b: [u8; 64], pub c: [u8; 32], } + +#[derive(Clone, Debug)] +pub struct MockBatchedAddressForester { + pub merkle_tree: IndexedMerkleTree, + pub queue_leaves: Vec<[u8; 32]>, + pub indexed_array: IndexedArray, +} +impl Default for MockBatchedAddressForester { + fn default() -> Self { + let mut merkle_tree = IndexedMerkleTree::::new(HEIGHT, 0).unwrap(); + merkle_tree.init().unwrap(); + let queue_leaves = vec![]; + let mut indexed_array = IndexedArray::::default(); + indexed_array.init().unwrap(); + Self { + merkle_tree, + queue_leaves, + indexed_array, + } + } +} + +impl MockBatchedAddressForester { + pub async fn get_batched_address_proof( + &mut self, + batch_size: u32, + zkp_batch_size: u32, + leaves_hashchain: [u8; 32], + start_index: usize, + batch_start_index: usize, + current_root: [u8; 32], + ) -> Result<(CompressedProof, [u8; 32]), Error> { + println!("batch size {:?}", batch_size); + println!("start index {:?}", start_index); + println!("batch start index {:?}", batch_start_index); + let new_element_values = self.queue_leaves[..batch_size as usize].to_vec(); + // for _ in 0..batch_size { + // self.queue_leaves.remove(0); + // } + assert_eq!( + self.merkle_tree.merkle_tree.rightmost_index, + batch_start_index + ); + assert!( + batch_start_index >= 2, + "start index should be greater than 2 else tree is not inited" + ); + // let current_root = self.merkle_tree.root(); + println!("new element values {:?}", new_element_values); + let mut low_element_values = Vec::new(); + let mut low_element_indices = Vec::new(); + let mut low_element_next_indices = Vec::new(); + let mut low_element_next_values = Vec::new(); + let mut low_element_proofs: Vec> = Vec::new(); + + for new_element_value in &new_element_values { + println!("new element value {:?}", new_element_value); + let non_inclusion_proof = self + .merkle_tree + .get_non_inclusion_proof( + &BigUint::from_bytes_be(new_element_value.as_slice()), + &self.indexed_array, + ) + .unwrap(); + + low_element_values.push(non_inclusion_proof.leaf_lower_range_value); + low_element_indices.push(non_inclusion_proof.leaf_index); + low_element_next_indices.push(non_inclusion_proof.next_index); + low_element_next_values.push(non_inclusion_proof.leaf_higher_range_value); + + low_element_proofs.push(non_inclusion_proof.merkle_proof.as_slice().to_vec()); + } + // // local_leaves_hashchain is only used for a test assertion. + // let local_nullifier_hashchain = calculate_hash_chain(&new_element_values); + // assert_eq!(leaves_hashchain, local_nullifier_hashchain); + let inputs = get_batch_address_append_circuit_inputs::( + start_index, + current_root, + low_element_values, + low_element_next_values, + low_element_indices, + low_element_next_indices, + low_element_proofs, + new_element_values.clone(), + self.merkle_tree + .merkle_tree + .get_subtrees() + .try_into() + .unwrap(), + leaves_hashchain, + batch_start_index, + zkp_batch_size as usize, + ); + println!("inputs {:?}", inputs); + let client = Client::new(); + let circuit_inputs_new_root = bigint_to_be_bytes_array::<32>(&inputs.new_root).unwrap(); + let inputs = to_json(&inputs); + + // let new_root = self.merkle_tree.root(); + + let response_result = client + .post(&format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) + .header("Content-Type", "text/plain; charset=utf-8") + .body(inputs) + .send() + .await + .expect("Failed to execute request."); + + // assert_eq!(circuit_inputs_new_root, new_root); + + if response_result.status().is_success() { + let body = response_result.text().await.unwrap(); + let proof_json = deserialize_gnark_proof_json(&body).unwrap(); + let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); + let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); + return Ok(( + CompressedProof { + a: proof_a, + b: proof_b, + c: proof_c, + }, + circuit_inputs_new_root, + )); + } + println!("response result {:?}", response_result); + Err(Error) + } + + pub fn finalize_batch_address_update(&mut self, batch_size: usize) { + println!("finalize batch address update"); + let new_element_values = self.queue_leaves[..batch_size].to_vec(); + for _ in 0..batch_size { + self.queue_leaves.remove(0); + } + for new_element_value in &new_element_values { + self.merkle_tree + .append( + &BigUint::from_bytes_be(new_element_value), + &mut self.indexed_array, + ) + .unwrap(); + } + } +} diff --git a/circuit-lib/light-prover-client/tests/gnark.rs b/circuit-lib/light-prover-client/tests/gnark.rs index ed0fe1a303..20b1fe9f93 100644 --- a/circuit-lib/light-prover-client/tests/gnark.rs +++ b/circuit-lib/light-prover-client/tests/gnark.rs @@ -10,7 +10,7 @@ use light_prover_client::gnark::batch_append_with_subtrees_json_formatter::appen use light_prover_client::gnark::batch_update_json_formatter::update_inputs_string; use light_prover_client::{ batch_address_append::{ - get_batch_address_append_inputs_from_tree, get_test_batch_address_append_inputs, + get_batch_address_append_circuit_inputs, get_test_batch_address_append_inputs, }, gnark::batch_address_append_json_formatter::to_json, }; @@ -338,7 +338,7 @@ pub fn print_circuit_test_data_with_existing_tree() { IndexedMerkleTree::::new(TREE_HEIGHT, 0).unwrap(); relayer_merkle_tree.init().unwrap(); - let start_index = relayer_merkle_tree.merkle_tree.rightmost_index; + let next_index = relayer_merkle_tree.merkle_tree.rightmost_index; let current_root = relayer_merkle_tree.root(); @@ -364,10 +364,11 @@ pub fn print_circuit_test_data_with_existing_tree() { let new_element_values = new_element_values .iter() .map(|v| bigint_to_be_bytes_array::<32>(&v).unwrap()) - .collect(); + .collect::>(); + let hash_chain = calculate_hash_chain(&new_element_values); - let inputs = get_batch_address_append_inputs_from_tree::( - start_index, + let inputs = get_batch_address_append_circuit_inputs::( + next_index, current_root, low_element_values, low_element_next_values, @@ -380,6 +381,9 @@ pub fn print_circuit_test_data_with_existing_tree() { .get_subtrees() .try_into() .unwrap(), + hash_chain, + 0, + 2, ); let json_output = to_json(&inputs); @@ -456,7 +460,7 @@ async fn prove_batch_address_append() { init_logger(); println!("spawning prover"); spawn_prover( - true, + false, ProverConfig { run_mode: None, circuits: vec![ProofType::BatchAddressAppendTest], @@ -503,10 +507,11 @@ async fn prove_batch_address_append() { let new_element_values = new_element_values .iter() .map(|v| bigint_to_be_bytes_array::<32>(v).unwrap()) - .collect(); + .collect::>(); + let hash_chain = calculate_hash_chain(&new_element_values); // Generate circuit inputs - let inputs = get_batch_address_append_inputs_from_tree::( + let inputs = get_batch_address_append_circuit_inputs::( start_index, current_root, low_element_values, @@ -520,11 +525,12 @@ async fn prove_batch_address_append() { .get_subtrees() .try_into() .unwrap(), + hash_chain, + 0, + 10, ); - // Convert inputs to JSON format let inputs_json = to_json(&inputs); - // Send proof request to server let client = Client::new(); let response_result = client diff --git a/circuit-lib/verifier/src/lib.rs b/circuit-lib/verifier/src/lib.rs index e56bfa030e..df665d4e54 100644 --- a/circuit-lib/verifier/src/lib.rs +++ b/circuit-lib/verifier/src/lib.rs @@ -173,6 +173,7 @@ pub fn verify_merkle_proof_zkp( leaves: &[[u8; 32]], compressed_proof: &CompressedProof, ) -> Result<(), VerifierError> { + // TODO: replace with poseidon hash which avoids copying the data let public_inputs = [roots, leaves].concat(); // The public inputs are expected to be a multiple of 2 @@ -354,3 +355,39 @@ pub fn verify_batch_update( _ => Err(crate::InvalidPublicInputsLength), } } + +#[inline(never)] +pub fn verify_batch_address_update( + batch_size: usize, + public_input_hash: [u8; 32], + compressed_proof: &CompressedProof, +) -> Result<(), VerifierError> { + match batch_size { + 1 => verify::<1>( + &[public_input_hash], + compressed_proof, + &crate::verifying_keys::address_append_26_1::VERIFYINGKEY, + ), + 10 => verify::<1>( + &[public_input_hash], + compressed_proof, + &crate::verifying_keys::address_append_26_10::VERIFYINGKEY, + ), + 100 => verify::<1>( + &[public_input_hash], + compressed_proof, + &crate::verifying_keys::address_append_26_100::VERIFYINGKEY, + ), + 500 => verify::<1>( + &[public_input_hash], + compressed_proof, + &crate::verifying_keys::address_append_26_500::VERIFYINGKEY, + ), + 1000 => verify::<1>( + &[public_input_hash], + compressed_proof, + &crate::verifying_keys::address_append_26_1000::VERIFYINGKEY, + ), + _ => Err(crate::InvalidPublicInputsLength), + } +} diff --git a/examples/name-service/programs/name-service/tests/test.rs b/examples/name-service/programs/name-service/tests/test.rs index cae0f49646..d29ccb63f3 100644 --- a/examples/name-service/programs/name-service/tests/test.rs +++ b/examples/name-service/programs/name-service/tests/test.rs @@ -67,8 +67,9 @@ async fn test_name_service() { address_queue_pubkey: env.address_merkle_tree_queue_pubkey, }; - let address_seed = derive_address_seed(&[b"name-service", name.as_bytes()], &name_service::ID); - let address = derive_address(&address_seed, &address_merkle_context); + let address_seed = + derive_address_legacy_seed(&[b"name-service", name.as_bytes()], &name_service::ID); + let address = derive_address_legacy(&address_seed, &address_merkle_context); let address_merkle_context = pack_address_merkle_context(address_merkle_context, &mut remaining_accounts); diff --git a/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/escrow.rs b/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/escrow.rs index afd0c0f8b3..4062a3313e 100644 --- a/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/escrow.rs +++ b/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/escrow.rs @@ -15,7 +15,7 @@ use light_sdk::{ use light_system_program::{ invoke::processor::CompressedProof, sdk::{ - address::derive_address, + address::derive_address_legacy, compressed_account::{CompressedAccount, CompressedAccountData, PackedMerkleContext}, CompressedCpiContext, }, @@ -137,7 +137,7 @@ fn create_compressed_pda_data( .hash::() .map_err(ProgramError::from)?, }; - let derive_address = derive_address( + let derive_address = derive_address_legacy( &ctx.remaining_accounts[new_address_params.address_merkle_tree_account_index as usize] .key(), &new_address_params.seed, diff --git a/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs b/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs index 64c5b39410..7bac13a9b6 100644 --- a/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs +++ b/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs @@ -17,7 +17,7 @@ use anchor_lang::AnchorDeserialize; use light_hasher::{Hasher, Poseidon}; use light_program_test::test_env::{setup_test_programs_with_accounts, EnvAccounts}; use light_prover_client::gnark::helpers::{ProverConfig, ProverMode}; -use light_system_program::sdk::address::derive_address; +use light_system_program::sdk::address::derive_address_legacy; use light_system_program::sdk::compressed_account::MerkleContext; use light_system_program::sdk::event::PublicTransactionEvent; use light_system_program::NewAddressParams; @@ -233,7 +233,7 @@ async fn create_escrow_ix( ) .unwrap(); - let address = derive_address(&env.address_merkle_tree_pubkey, &seed).unwrap(); + let address = derive_address_legacy(&env.address_merkle_tree_pubkey, &seed).unwrap(); let rpc_result = test_indexer .create_proof_for_compressed_accounts( @@ -316,7 +316,7 @@ pub async fn assert_escrow( .find(|x| x.compressed_account.owner == token_escrow::ID) .unwrap() .clone(); - let address = derive_address(&env.address_merkle_tree_pubkey, seed).unwrap(); + let address = derive_address_legacy(&env.address_merkle_tree_pubkey, seed).unwrap(); assert_eq!( compressed_escrow_pda.compressed_account.address.unwrap(), address @@ -536,7 +536,7 @@ pub async fn assert_withdrawal( .unwrap() .clone(); - let address = derive_address(&env.address_merkle_tree_pubkey, seed).unwrap(); + let address = derive_address_legacy(&env.address_merkle_tree_pubkey, seed).unwrap(); assert_eq!( compressed_escrow_pda.compressed_account.address.unwrap(), address diff --git a/forester-utils/src/indexer/mod.rs b/forester-utils/src/indexer/mod.rs index c1659a0a1f..8dc58130bd 100644 --- a/forester-utils/src/indexer/mod.rs +++ b/forester-utils/src/indexer/mod.rs @@ -69,9 +69,27 @@ pub struct AddressMerkleTreeBundle { pub merkle_tree: Box>, pub indexed_array: Box>, pub accounts: AddressMerkleTreeAccounts, + pub queue_elements: Vec<[u8; 32]>, } pub trait Indexer: Sync + Send + Debug + 'static { + /// Returns queue elements from the queue with the given pubkey. For input + /// queues account compression program does not store queue elements in the + /// account data but only emits these in the public transaction event. The + /// indexer needs the queue elements to create batch update proofs. + fn get_queue_elements( + &self, + pubkey: [u8; 32], + batch: u64, + start_offset: u64, + end_offset: u64, + ) -> impl std::future::Future, IndexerError>> + Send + Sync; + + fn get_subtrees( + &self, + merkle_tree_pubkey: [u8; 32], + ) -> impl std::future::Future, IndexerError>> + Send + Sync; + fn get_multiple_compressed_account_proofs( &self, hashes: Vec, @@ -86,7 +104,14 @@ pub trait Indexer: Sync + Send + Debug + 'static { &self, merkle_tree_pubkey: [u8; 32], addresses: Vec<[u8; 32]>, - ) -> impl std::future::Future, IndexerError>> + ) -> impl std::future::Future>, IndexerError>> + + Send + + Sync; + fn get_multiple_new_address_proofs_full( + &self, + merkle_tree_pubkey: [u8; 32], + addresses: Vec<[u8; 32]>, + ) -> impl std::future::Future>, IndexerError>> + Send + Sync; @@ -95,7 +120,7 @@ pub trait Indexer: Sync + Send + Debug + 'static { fn address_tree_updated( &mut self, _merkle_tree_pubkey: Pubkey, - _context: &NewAddressProofWithContext, + _context: &NewAddressProofWithContext<16>, ) { } @@ -201,8 +226,8 @@ pub struct MerkleProof { } // For consistency with the Photon API. -#[derive(Clone, Default, Debug, PartialEq)] -pub struct NewAddressProofWithContext { +#[derive(Clone, Debug, PartialEq)] +pub struct NewAddressProofWithContext { pub merkle_tree: [u8; 32], pub root: [u8; 32], pub root_seq: u64, @@ -210,7 +235,7 @@ pub struct NewAddressProofWithContext { pub low_address_value: [u8; 32], pub low_address_next_index: u64, pub low_address_next_value: [u8; 32], - pub low_address_proof: [[u8; 32]; 16], + pub low_address_proof: [[u8; 32]; NET_HEIGHT], pub new_low_element: Option>, pub new_element: Option>, pub new_element_next_value: Option, diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index 603be0be45..adae6e9ccd 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -68,7 +68,7 @@ impl WorkItem { #[allow(clippy::large_enum_variant)] #[derive(Debug, Clone)] pub enum MerkleProofType { - AddressProof(NewAddressProofWithContext), + AddressProof(NewAddressProofWithContext<16>), StateProof(MerkleProof), } diff --git a/forester/src/photon_indexer.rs b/forester/src/photon_indexer.rs index 27dbc03ca1..056bf750c4 100644 --- a/forester/src/photon_indexer.rs +++ b/forester/src/photon_indexer.rs @@ -38,6 +38,28 @@ impl Debug for PhotonIndexer { } impl Indexer for PhotonIndexer { + async fn get_queue_elements( + &self, + _pubkey: [u8; 32], + _batch: u64, + _start_offset: u64, + _end_offset: u64, + ) -> Result, IndexerError> { + unimplemented!() + } + async fn get_subtrees( + &self, + _merkle_tree_pubkey: [u8; 32], + ) -> Result, IndexerError> { + unimplemented!() + } + async fn get_multiple_new_address_proofs_full( + &self, + _merkle_tree_pubkey: [u8; 32], + _addresses: Vec<[u8; 32]>, + ) -> Result>, IndexerError> { + unimplemented!() + } async fn get_multiple_compressed_account_proofs( &self, hashes: Vec, @@ -124,7 +146,7 @@ impl Indexer for PhotonIndexer { &self, merkle_tree_pubkey: [u8; 32], addresses: Vec<[u8; 32]>, - ) -> Result, IndexerError> { + ) -> Result>, IndexerError> { let params: Vec = addresses .iter() .map(|x| AddressWithTree { @@ -153,7 +175,8 @@ impl Indexer for PhotonIndexer { } let photon_proofs = result.unwrap().result.unwrap().value; - let mut proofs: Vec = Vec::new(); + // net height 16 = height(26) - canopy(10) + let mut proofs: Vec> = Vec::new(); for photon_proof in photon_proofs { let tree_pubkey = decode_hash(&photon_proof.merkle_tree); let low_address_value = decode_hash(&photon_proof.lower_range_address); diff --git a/forester/src/rollover/operations.rs b/forester/src/rollover/operations.rs index b24c3edcfa..e319126f1c 100644 --- a/forester/src/rollover/operations.rs +++ b/forester/src/rollover/operations.rs @@ -101,7 +101,7 @@ pub async fn get_tree_fullness( let threshold = ((1 << height) * queue_account.metadata.rollover_metadata.rollover_threshold / 100) as usize; - let next_index = merkle_tree.next_index() - 3; + let next_index = merkle_tree.next_index().saturating_sub(3); let fullness = next_index as f64 / capacity as f64; Ok(TreeInfo { diff --git a/forester/tests/test_utils.rs b/forester/tests/test_utils.rs index cc9272f18c..ac06b5f6d6 100644 --- a/forester/tests/test_utils.rs +++ b/forester/tests/test_utils.rs @@ -127,9 +127,9 @@ pub async fn assert_new_address_proofs_for_photon_and_test_indexer = address_proof_photon.unwrap().first().unwrap().clone(); - let test_indexer_result: NewAddressProofWithContext = + let test_indexer_result: NewAddressProofWithContext<16> = address_proof_test_indexer.unwrap().first().unwrap().clone(); debug!( "assert proofs for address: {} photon result: {:?} test indexer result: {:?}", diff --git a/light-program-test/src/test_env.rs b/light-program-test/src/test_env.rs index 17c40c1deb..c00bbb5033 100644 --- a/light-program-test/src/test_env.rs +++ b/light-program-test/src/test_env.rs @@ -9,7 +9,8 @@ use account_compression::{ sdk::create_initialize_merkle_tree_instruction, GroupAuthority, RegisteredProgram, }; use account_compression::{ - AddressMerkleTreeConfig, AddressQueueConfig, InitStateTreeAccountsInstructionData, QueueType, + AddressMerkleTreeConfig, AddressQueueConfig, InitAddressTreeAccountsInstructionData, + InitStateTreeAccountsInstructionData, QueueType, }; use account_compression::{NullifierQueueConfig, StateMerkleTreeConfig}; use forester_utils::forester_epoch::{Epoch, TreeAccounts, TreeType}; @@ -143,6 +144,7 @@ pub struct EnvAccounts { pub batched_state_merkle_tree: Pubkey, pub batched_output_queue: Pubkey, pub batched_cpi_context: Pubkey, + pub batch_address_merkle_tree: Pubkey, } impl EnvAccounts { @@ -166,6 +168,7 @@ impl EnvAccounts { batched_state_merkle_tree: pubkey!("HLKs5NJ8FXkJg8BrzJt56adFYYuwg5etzDtBbQYTsixu"), batched_output_queue: pubkey!("6L7SzhYB3anwEQ9cphpJ1U7Scwj57bx2xueReg7R9cKU"), batched_cpi_context: pubkey!("7Hp52chxaew8bW1ApR4fck2bh6Y8qA1pu3qwH6N9zaLj"), + batch_address_merkle_tree: pubkey!("EzKE84aVTkCUhDHLELqyJaq1Y7UVVmqxXqZjVHwHY3rK"), } } } @@ -184,6 +187,7 @@ pub struct EnvAccountKeypairs { pub batched_state_merkle_tree: Keypair, pub batched_output_queue: Keypair, pub batched_cpi_context: Keypair, + pub batch_address_merkle_tree: Keypair, } impl EnvAccountKeypairs { @@ -203,6 +207,10 @@ impl EnvAccountKeypairs { .unwrap(), batched_output_queue: Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap(), batched_cpi_context: Keypair::from_bytes(&BATCHED_CPI_CONTEXT_TEST_KEYPAIR).unwrap(), + batch_address_merkle_tree: Keypair::from_bytes( + &BATCHED_ADDRESS_MERKLE_TREE_TEST_KEYPAIR, + ) + .unwrap(), } } @@ -264,6 +272,10 @@ impl EnvAccountKeypairs { .unwrap(), batched_output_queue: Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap(), batched_cpi_context: Keypair::from_bytes(&BATCHED_CPI_CONTEXT_TEST_KEYPAIR).unwrap(), + batch_address_merkle_tree: Keypair::from_bytes( + &BATCHED_ADDRESS_MERKLE_TREE_TEST_KEYPAIR, + ) + .unwrap(), } } } @@ -361,6 +373,13 @@ pub const BATCHED_CPI_CONTEXT_TEST_KEYPAIR: [u8; 64] = [ 3, 12, 228, ]; +// EzKE84aVTkCUhDHLELqyJaq1Y7UVVmqxXqZjVHwHY3rK +pub const BATCHED_ADDRESS_MERKLE_TREE_TEST_KEYPAIR: [u8; 64] = [ + 39, 24, 219, 214, 174, 34, 141, 22, 238, 96, 128, 5, 244, 12, 239, 3, 45, 61, 42, 53, 92, 87, + 28, 24, 35, 87, 72, 11, 158, 224, 210, 70, 207, 214, 165, 6, 152, 46, 60, 129, 118, 32, 27, + 128, 68, 73, 71, 250, 6, 83, 176, 199, 153, 140, 237, 11, 55, 237, 3, 179, 242, 138, 37, 12, +]; + /// Setup test programs with accounts /// deploys: /// 1. light program @@ -439,6 +458,7 @@ pub async fn setup_test_programs_with_accounts_with_protocol_config( protocol_config, register_forester_and_advance_to_active_phase, InitStateTreeAccountsInstructionData::test_default(), + InitAddressTreeAccountsInstructionData::test_default(), ) .await } @@ -448,6 +468,7 @@ pub async fn setup_test_programs_with_accounts_with_protocol_config_and_batched_ protocol_config: ProtocolConfig, register_forester_and_advance_to_active_phase: bool, batched_tree_init_params: InitStateTreeAccountsInstructionData, + batched_address_tree_init_params: InitAddressTreeAccountsInstructionData, ) -> (ProgramTestRpcConnection, EnvAccounts) { let context = setup_test_programs(additional_programs).await; let mut context = ProgramTestRpcConnection { context }; @@ -469,6 +490,7 @@ pub async fn setup_test_programs_with_accounts_with_protocol_config_and_batched_ register_forester_and_advance_to_active_phase, true, batched_tree_init_params, + batched_address_tree_init_params, ) .await; (context, env_accounts) @@ -501,6 +523,7 @@ pub async fn setup_test_programs_with_accounts_with_protocol_config_v2( register_forester_and_advance_to_active_phase, true, params, + InitAddressTreeAccountsInstructionData::test_default(), ) .await; (context, env_accounts) @@ -517,6 +540,7 @@ pub async fn setup_accounts(keypairs: EnvAccountKeypairs, url: SolanaRpcUrl) -> false, false, params, + InitAddressTreeAccountsInstructionData::test_default(), ) .await } @@ -528,6 +552,7 @@ pub async fn initialize_accounts( register_forester_and_advance_to_active_phase: bool, skip_register_programs: bool, batched_tree_init_params: InitStateTreeAccountsInstructionData, + batched_address_tree_init_params: InitAddressTreeAccountsInstructionData, ) -> EnvAccounts { let cpi_authority_pda = get_cpi_authority_pda(); let protocol_config_pda = get_protocol_config_pda_address(); @@ -633,6 +658,23 @@ pub async fn initialize_accounts( .await .unwrap(); + create_batch_address_merkle_tree( + context, + &keypairs.governance_authority, + &keypairs.batch_address_merkle_tree, + batched_address_tree_init_params, + ) + .await + .unwrap(); + assert_registry_created_batched_address_merkle_tree( + context, + get_group_pda(group_seed_keypair.pubkey()), + keypairs.batch_address_merkle_tree.pubkey(), + batched_address_tree_init_params, + ) + .await + .unwrap(); + create_address_merkle_tree_and_queue_account( &keypairs.governance_authority, true, @@ -713,6 +755,7 @@ pub async fn initialize_accounts( batched_cpi_context: keypairs.batched_cpi_context.pubkey(), batched_output_queue: keypairs.batched_output_queue.pubkey(), batched_state_merkle_tree: keypairs.batched_state_merkle_tree.pubkey(), + batch_address_merkle_tree: keypairs.batch_address_merkle_tree.pubkey(), } } pub fn get_group_pda(seed: Pubkey) -> Pubkey { @@ -813,6 +856,9 @@ pub fn get_test_env_accounts() -> EnvAccounts { batched_state_merkle_tree: Keypair::from_bytes(&BATCHED_STATE_MERKLE_TREE_TEST_KEYPAIR) .unwrap() .pubkey(), + batch_address_merkle_tree: Keypair::from_bytes(&BATCHED_ADDRESS_MERKLE_TREE_TEST_KEYPAIR) + .unwrap() + .pubkey(), } } diff --git a/light-prover/prover/batch_address_append_circuit.go b/light-prover/prover/batch_address_append_circuit.go index e3e07baee4..36f4590137 100644 --- a/light-prover/prover/batch_address_append_circuit.go +++ b/light-prover/prover/batch_address_append_circuit.go @@ -293,6 +293,7 @@ func (ps *ProvingSystemV2) ProveBatchAddressAppend(params *BatchAddressAppendPar if params == nil { panic("params cannot be nil") } + fmt.Printf(" params: %+v\n", params) if err := params.ValidateShape(); err != nil { return nil, err } diff --git a/light-prover/prover/marshal_batch_address_append.go b/light-prover/prover/marshal_batch_address_append.go index 8a45113df3..4a894c6c31 100644 --- a/light-prover/prover/marshal_batch_address_append.go +++ b/light-prover/prover/marshal_batch_address_append.go @@ -2,6 +2,7 @@ package prover import ( "encoding/json" + "fmt" "math/big" ) @@ -160,7 +161,7 @@ func (p *BatchAddressAppendParameters) UpdateWithJSON(params BatchAddressAppendP if err != nil { return err } - + fmt.Printf("params: %v\n", params) return nil } diff --git a/merkle-tree/indexed/src/array.rs b/merkle-tree/indexed/src/array.rs index e0a76bf194..6641061eb4 100644 --- a/merkle-tree/indexed/src/array.rs +++ b/merkle-tree/indexed/src/array.rs @@ -39,6 +39,8 @@ where { fn eq(&self, other: &Self) -> bool { self.value == other.value + && self.index == other.index + && self.next_index == other.next_index } } @@ -86,6 +88,9 @@ where where H: Hasher, { + println!("self.value: {:?}", self.value); + println!("self.next_index: {:?}", self.next_index()); + println!("next_value: {:?}", next_value); let hash = H::hashv(&[ bigint_to_be_bytes_array::<32>(&self.value)?.as_ref(), self.next_index.to_be_bytes().as_ref(), @@ -476,9 +481,9 @@ mod test { next_index: 1, }; let element_2 = IndexedElement:: { - index: 1, + index: 0, value, - next_index: 2, + next_index: 1, }; assert_eq!(element_1, element_2); assert_eq!(element_2, element_1); @@ -765,7 +770,7 @@ mod test { next_index: 3, }, &IndexedElement { - index: 2, + index: 3, value: 20_u32.to_biguint().unwrap(), next_index: 1 } diff --git a/merkle-tree/indexed/src/lib.rs b/merkle-tree/indexed/src/lib.rs index d332337f5d..b404734ae6 100644 --- a/merkle-tree/indexed/src/lib.rs +++ b/merkle-tree/indexed/src/lib.rs @@ -344,6 +344,7 @@ where value: new_element_value, next_index: low_element.next_index, }; + println!("low_element: {:?}", low_element); self.patch_elements_and_proof( indexed_changelog_index, @@ -353,6 +354,7 @@ where &mut low_element_next_value, low_leaf_proof, )?; + println!("patched low_element: {:?}", low_element); // Check that the value of `new_element` belongs to the range // of `old_low_element`. if low_element.next_index == I::zero() { diff --git a/merkle-tree/indexed/src/reference.rs b/merkle-tree/indexed/src/reference.rs index 9a61eccb9e..7861d02c2a 100644 --- a/merkle-tree/indexed/src/reference.rs +++ b/merkle-tree/indexed/src/reference.rs @@ -111,12 +111,15 @@ where ) -> Result<(), IndexedReferenceMerkleTreeError> { // Update the low element. let new_low_leaf = new_low_element.hash::(&new_element.value)?; + println!("reference update new low leaf hash {:?}", new_low_leaf); self.merkle_tree .update(&new_low_leaf, usize::from(new_low_element.index))?; - + println!("reference updated root {:?}", self.merkle_tree.root()); // Append the new element. let new_leaf = new_element.hash::(new_element_next_value)?; + println!("reference update new leaf hash {:?}", new_leaf); self.merkle_tree.append(&new_leaf)?; + println!("reference appended root {:?}", self.merkle_tree.root()); Ok(()) } @@ -127,6 +130,7 @@ where value: &BigUint, indexed_array: &mut IndexedArray, ) -> Result<(), IndexedReferenceMerkleTreeError> { + println!("appending {:?}", value); let nullifier_bundle = indexed_array.append(value).unwrap(); self.update( &nullifier_bundle.new_low_element, diff --git a/merkle-tree/reference/src/lib.rs b/merkle-tree/reference/src/lib.rs index ae59b6442d..2a67a702c5 100644 --- a/merkle-tree/reference/src/lib.rs +++ b/merkle-tree/reference/src/lib.rs @@ -100,6 +100,10 @@ where self.layers[0].push(*leaf); let i = self.rightmost_index; + if self.rightmost_index == self.capacity { + println!("Merkle tree full"); + return Err(HasherError::IntegerOverflow); + } self.rightmost_index += 1; self.update_upper_layers(i)?; diff --git a/programs/account-compression/src/errors.rs b/programs/account-compression/src/errors.rs index 45a2077470..d141ebe76d 100644 --- a/programs/account-compression/src/errors.rs +++ b/programs/account-compression/src/errors.rs @@ -78,4 +78,6 @@ pub enum AccountCompressionErrorCode { UnsupportedParameters, InvalidTreeType, InvalidNetworkFee, + AddressMerkleTreeAccountDiscriminatorMismatch, + StateMerkleTreeAccountDiscriminatorMismatch, } diff --git a/programs/account-compression/src/instructions/append_leaves.rs b/programs/account-compression/src/instructions/append_leaves.rs index d20378475d..f85653e850 100644 --- a/programs/account-compression/src/instructions/append_leaves.rs +++ b/programs/account-compression/src/instructions/append_leaves.rs @@ -123,7 +123,11 @@ fn batch_append_leaves<'a, 'c: 'info, 'info>( BatchedQueueAccount::DISCRIMINATOR => { append_v2(ctx, merkle_tree_acc_info, batch_size, &leaves[start..end])? } - _ => return err!(anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch), + _ => { + return err!( + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch + ) + } } }; transfer_lamports_cpi(&ctx.accounts.fee_payer, merkle_tree_acc_info, rollover_fee)?; diff --git a/programs/account-compression/src/instructions/batch_append.rs b/programs/account-compression/src/instructions/batch_append.rs index cddc9069f1..09ab2af1a7 100644 --- a/programs/account-compression/src/instructions/batch_append.rs +++ b/programs/account-compression/src/instructions/batch_append.rs @@ -37,8 +37,9 @@ pub fn process_batch_append_leaves<'a, 'b, 'c: 'info, 'info>( ctx: &'a Context<'a, 'b, 'c, 'info, BatchAppend<'info>>, instruction_data: InstructionDataBatchAppendInputs, ) -> Result<()> { - let account_data = &mut ctx.accounts.merkle_tree.try_borrow_mut_data()?; - let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data)?; + let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::state_tree_from_account_info_mut( + &ctx.accounts.merkle_tree, + )?; check_signer_is_registered_or_authority::( ctx, merkle_tree, diff --git a/programs/account-compression/src/instructions/batch_nullify.rs b/programs/account-compression/src/instructions/batch_nullify.rs index ba21ca0aa7..dd74c98632 100644 --- a/programs/account-compression/src/instructions/batch_nullify.rs +++ b/programs/account-compression/src/instructions/batch_nullify.rs @@ -33,8 +33,9 @@ pub fn process_batch_nullify<'a, 'b, 'c: 'info, 'info>( ctx: &'a Context<'a, 'b, 'c, 'info, BatchNullify<'info>>, instruction_data: InstructionDataBatchNullifyInputs, ) -> Result<()> { - let account_data = &mut ctx.accounts.merkle_tree.try_borrow_mut_data()?; - let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data)?; + let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::state_tree_from_account_info_mut( + &ctx.accounts.merkle_tree, + )?; check_signer_is_registered_or_authority::( ctx, merkle_tree, diff --git a/programs/account-compression/src/instructions/batch_update_address_tree.rs b/programs/account-compression/src/instructions/batch_update_address_tree.rs new file mode 100644 index 0000000000..539d3f9eb2 --- /dev/null +++ b/programs/account-compression/src/instructions/batch_update_address_tree.rs @@ -0,0 +1,46 @@ +use crate::{ + batched_merkle_tree::{InstructionDataBatchNullifyInputs, ZeroCopyBatchedMerkleTreeAccount}, + emit_indexer_event, + utils::check_signer_is_registered_or_authority::{ + check_signer_is_registered_or_authority, GroupAccounts, + }, + RegisteredProgram, +}; +use anchor_lang::prelude::*; + +#[derive(Accounts)] +pub struct BatchUpdateAddressTree<'info> { + /// CHECK: should only be accessed by a registered program or owner. + pub authority: Signer<'info>, + pub registered_program_pda: Option>, + /// CHECK: when emitting event. + pub log_wrapper: UncheckedAccount<'info>, + /// CHECK: in from_bytes_mut. + #[account(mut)] + pub merkle_tree: AccountInfo<'info>, +} + +impl<'info> GroupAccounts<'info> for BatchUpdateAddressTree<'info> { + fn get_authority(&self) -> &Signer<'info> { + &self.authority + } + fn get_registered_program_pda(&self) -> &Option> { + &self.registered_program_pda + } +} + +pub fn process_batch_update_address_tree<'a, 'b, 'c: 'info, 'info>( + ctx: &'a Context<'a, 'b, 'c, 'info, BatchUpdateAddressTree<'info>>, + instruction_data: InstructionDataBatchNullifyInputs, +) -> Result<()> { + let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_account_info_mut( + &ctx.accounts.merkle_tree, + )?; + check_signer_is_registered_or_authority::< + BatchUpdateAddressTree, + ZeroCopyBatchedMerkleTreeAccount, + >(ctx, merkle_tree)?; + let event = merkle_tree + .update_address_queue(instruction_data, ctx.accounts.merkle_tree.key().to_bytes())?; + emit_indexer_event(event.try_to_vec()?, &ctx.accounts.log_wrapper) +} diff --git a/programs/account-compression/src/instructions/insert_into_queues.rs b/programs/account-compression/src/instructions/insert_into_queues.rs index a09300e6f6..48c4afc206 100644 --- a/programs/account-compression/src/instructions/insert_into_queues.rs +++ b/programs/account-compression/src/instructions/insert_into_queues.rs @@ -1,5 +1,5 @@ use crate::{ - batched_merkle_tree::ZeroCopyBatchedMerkleTreeAccount, + batched_merkle_tree::{BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount}, batched_queue::{BatchedQueueAccount, ZeroCopyBatchedQueueAccount}, check_queue_type, errors::AccountCompressionErrorCode, @@ -71,6 +71,14 @@ pub fn process_insert_into_queues<'a, 'b, 'c: 'info, 'info, MerkleTreeAccount: O indices[index], ctx.remaining_accounts, )?, + // Address queue is part of the address Merkle tree account. + BatchedMerkleTreeAccount::DISCRIMINATOR => add_address_queue_bundle_v2( + &mut current_index, + queue_type, + &mut queue_map, + element, + ctx.remaining_accounts, + )?, _ => { msg!( "Invalid account discriminator {:?}", @@ -96,6 +104,7 @@ pub fn process_insert_into_queues<'a, 'b, 'c: 'info, 'info, MerkleTreeAccount: O QueueType::NullifierQueue => process_queue_bundle_v1(&ctx, queue_bundle), QueueType::AddressQueue => process_queue_bundle_v1(&ctx, queue_bundle), QueueType::Input => process_queue_bundle_v2(&ctx, queue_bundle, &tx_hash), + QueueType::Address => process_address_queue_bundle_v2(&ctx, queue_bundle), _ => { msg!("Queue type {:?} is not supported", queue_bundle.queue_type); return err!(AccountCompressionErrorCode::InvalidQueueType); @@ -106,7 +115,7 @@ pub fn process_insert_into_queues<'a, 'b, 'c: 'info, 'info, MerkleTreeAccount: O transfer_lamports_cpi( &ctx.accounts.fee_payer, // Queue account - &queue_bundle.accounts[1].to_account_info(), + &queue_bundle.accounts[0].to_account_info(), rollover_fee, )?; } @@ -119,7 +128,7 @@ fn process_queue_bundle_v1<'info>( ctx: &Context<'_, '_, '_, 'info, InsertIntoQueues<'info>>, queue_bundle: &QueueBundle<'_, '_>, ) -> Result { - let queue = AccountLoader::::try_from(queue_bundle.accounts[1])?; + let queue = AccountLoader::::try_from(queue_bundle.accounts[0])?; light_heap::bench_sbf_start!("acp_prep_insertion"); let rollover_fee = { let queue = queue.load()?; @@ -129,7 +138,7 @@ fn process_queue_bundle_v1<'info>( }; { let sequence_number = { - let merkle_tree = queue_bundle.accounts[0].try_borrow_data()?; + let merkle_tree = queue_bundle.accounts[1].try_borrow_data()?; let merkle_tree = state_merkle_tree_from_bytes_zero_copy(&merkle_tree)?; merkle_tree.sequence_number() }; @@ -155,9 +164,10 @@ fn process_queue_bundle_v2<'info>( queue_bundle: &QueueBundle<'_, '_>, tx_hash: &Option<[u8; 32]>, ) -> Result { - let account_data = &mut queue_bundle.accounts[1].try_borrow_mut_data()?; - let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data)?; - let output_queue_account_data = &mut queue_bundle.accounts[0].try_borrow_mut_data()?; + let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::state_tree_from_account_info_mut( + queue_bundle.accounts[0], + )?; + let output_queue_account_data = &mut queue_bundle.accounts[1].try_borrow_mut_data()?; let output_queue = &mut ZeroCopyBatchedQueueAccount::from_bytes_mut(output_queue_account_data)?; check_signer_is_registered_or_authority::( ctx, @@ -185,6 +195,31 @@ fn process_queue_bundle_v2<'info>( Ok(rollover_fee) } +fn process_address_queue_bundle_v2<'info>( + ctx: &Context<'_, '_, '_, 'info, InsertIntoQueues<'info>>, + queue_bundle: &QueueBundle<'_, '_>, +) -> Result { + let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_account_info_mut( + queue_bundle.accounts[0], + )?; + check_signer_is_registered_or_authority::( + ctx, + merkle_tree, + )?; + let rollover_fee = merkle_tree + .get_account() + .metadata + .rollover_metadata + .rollover_fee + * queue_bundle.elements.len() as u64; + for element in queue_bundle.elements.iter() { + light_heap::bench_sbf_start!("acp_insert_nf_into_queue_v2"); + merkle_tree.insert_address_into_current_batch(element)?; + light_heap::bench_sbf_end!("acp_insert_nf_into_queue_v2"); + } + Ok(rollover_fee) +} + fn add_queue_bundle_v1<'a, 'info>( remaining_accounts_index: &mut usize, queue_type: QueueType, @@ -212,7 +247,7 @@ fn add_queue_bundle_v1<'a, 'info>( } queue_map .entry(queue.key()) - .or_insert_with(|| QueueBundle::new(queue_type, vec![merkle_tree, queue])) + .or_insert_with(|| QueueBundle::new(queue_type, vec![queue, merkle_tree])) .elements .push(element); *remaining_accounts_index += 2; @@ -227,7 +262,6 @@ fn add_queue_bundle_v2<'a, 'info>( index: u32, remaining_accounts: &'info [AccountInfo<'info>], ) -> Result<()> { - // TODO: add address support if queue_type != QueueType::NullifierQueue { msg!("Queue type Address is not supported for BatchedMerkleTreeAccount"); return err!(AccountCompressionErrorCode::InvalidQueueType); @@ -253,7 +287,7 @@ fn add_queue_bundle_v2<'a, 'info>( } queue_map .entry(merkle_tree.key()) - .or_insert_with(|| QueueBundle::new(QueueType::Input, vec![output_queue, merkle_tree])) + .or_insert_with(|| QueueBundle::new(QueueType::Input, vec![merkle_tree, output_queue])) .elements .push(element); queue_map @@ -263,3 +297,45 @@ fn add_queue_bundle_v2<'a, 'info>( Ok(()) } + +fn add_address_queue_bundle_v2<'a, 'info>( + remaining_accounts_index: &mut usize, + queue_type: QueueType, + queue_map: &mut std::collections::HashMap>, + address: &'a [u8; 32], + remaining_accounts: &'info [AccountInfo<'info>], +) -> Result<()> { + if queue_type != QueueType::AddressQueue { + msg!("Queue type Address is not supported for BatchedMerkleTreeAccount"); + return err!(AccountCompressionErrorCode::InvalidQueueType); + } + let merkle_tree = remaining_accounts.get(*remaining_accounts_index).unwrap(); + + // TODO: Reconsider whether we can avoid sending the Merkle tree account + // twice. Right now we do it for conistency with usage for other by + // nullification and address v1 instructions. + if merkle_tree.key() + != remaining_accounts + .get(*remaining_accounts_index + 1) + .unwrap() + .key() + { + msg!( + "Merkle tree accounts {:?} inconsistent {:?}", + merkle_tree.key(), + remaining_accounts + .get(*remaining_accounts_index + 1) + .unwrap() + .key() + ); + return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); + } + queue_map + .entry(merkle_tree.key()) + .or_insert_with(|| QueueBundle::new(QueueType::Address, vec![merkle_tree])) + .elements + .push(address); + *remaining_accounts_index += 2; + + Ok(()) +} diff --git a/programs/account-compression/src/instructions/intialize_batch_address_merkle_tree.rs b/programs/account-compression/src/instructions/intialize_batch_address_merkle_tree.rs new file mode 100644 index 0000000000..528d96d970 --- /dev/null +++ b/programs/account-compression/src/instructions/intialize_batch_address_merkle_tree.rs @@ -0,0 +1,394 @@ +use anchor_lang::prelude::*; +use light_utils::fee::compute_rollover_fee; + +use crate::{ + batched_merkle_tree::{ + get_merkle_tree_account_size, BatchedMerkleTreeAccount, TreeType, + ZeroCopyBatchedMerkleTreeAccount, + }, + initialize_address_queue::check_rollover_fee_sufficient, + match_circuit_size, + utils::{ + check_account::check_account_balance_is_rent_exempt, + check_signer_is_registered_or_authority::{ + check_signer_is_registered_or_authority, GroupAccounts, + }, + constants::{ + DEFAULT_BATCH_SIZE, DEFAULT_ZKP_BATCH_SIZE, TEST_DEFAULT_BATCH_SIZE, + TEST_DEFAULT_ZKP_BATCH_SIZE, + }, + }, + MerkleTreeMetadata, RegisteredProgram, +}; + +#[derive(Accounts)] +pub struct InitializeBatchAddressMerkleTree<'info> { + #[account(mut)] + pub authority: Signer<'info>, + #[account(zero)] + pub merkle_tree: AccountLoader<'info, BatchedMerkleTreeAccount>, + pub registered_program_pda: Option>, +} + +impl<'info> GroupAccounts<'info> for InitializeBatchAddressMerkleTree<'info> { + fn get_authority(&self) -> &Signer<'info> { + &self.authority + } + fn get_registered_program_pda(&self) -> &Option> { + &self.registered_program_pda + } +} + +#[derive(Debug, PartialEq, Clone, Copy, AnchorDeserialize, AnchorSerialize)] +pub struct InitAddressTreeAccountsInstructionData { + pub index: u64, + pub program_owner: Option, + pub forester: Option, + pub input_queue_batch_size: u64, + pub input_queue_zkp_batch_size: u64, + pub bloom_filter_num_iters: u64, + pub bloom_filter_capacity: u64, + pub root_history_capacity: u32, + pub network_fee: Option, + pub rollover_threshold: Option, + pub close_threshold: Option, + pub input_queue_num_batches: u64, + pub height: u32, +} + +impl InitAddressTreeAccountsInstructionData { + pub fn test_default() -> Self { + Self { + index: 0, + program_owner: None, + forester: None, + bloom_filter_num_iters: 3, + input_queue_batch_size: TEST_DEFAULT_BATCH_SIZE, + input_queue_zkp_batch_size: TEST_DEFAULT_ZKP_BATCH_SIZE, + input_queue_num_batches: 2, + height: 26, + root_history_capacity: 20, + bloom_filter_capacity: 20_000 * 8, + network_fee: Some(5000), + rollover_threshold: Some(95), + close_threshold: None, + } + } + + pub fn e2e_test_default() -> Self { + Self { + index: 0, + program_owner: None, + forester: None, + bloom_filter_num_iters: 3, + input_queue_batch_size: 500, + input_queue_zkp_batch_size: TEST_DEFAULT_ZKP_BATCH_SIZE, + input_queue_num_batches: 2, + height: 26, + root_history_capacity: 20, + bloom_filter_capacity: 20_000 * 8, + network_fee: Some(5000), + rollover_threshold: Some(95), + close_threshold: None, + } + } +} + +impl Default for InitAddressTreeAccountsInstructionData { + fn default() -> Self { + Self { + index: 0, + program_owner: None, + forester: None, + bloom_filter_num_iters: 3, + input_queue_batch_size: DEFAULT_BATCH_SIZE, + input_queue_zkp_batch_size: DEFAULT_ZKP_BATCH_SIZE, + input_queue_num_batches: 2, + height: 26, + root_history_capacity: (DEFAULT_BATCH_SIZE / DEFAULT_ZKP_BATCH_SIZE * 2) as u32, + bloom_filter_capacity: (DEFAULT_BATCH_SIZE + 1) * 8, + network_fee: Some(5000), + rollover_threshold: Some(95), + close_threshold: None, + } + } +} + +pub fn process_initialize_batched_address_merkle_tree<'info>( + ctx: Context<'_, '_, '_, 'info, InitializeBatchAddressMerkleTree<'info>>, + params: InitAddressTreeAccountsInstructionData, +) -> Result<()> { + #[cfg(feature = "test")] + validate_batched_address_tree_params(params); + #[cfg(not(feature = "test"))] + { + if params != InitAddressTreeAccountsInstructionData::default() { + return err!(AccountCompressionErrorCode::UnsupportedParameters); + } + } + + let owner = match ctx.accounts.registered_program_pda.as_ref() { + Some(registered_program_pda) => { + check_signer_is_registered_or_authority::< + InitializeBatchAddressMerkleTree, + RegisteredProgram, + >(&ctx, registered_program_pda)?; + registered_program_pda.group_authority_pda + } + None => ctx.accounts.authority.key(), + }; + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + + let merkle_tree_rent = check_account_balance_is_rent_exempt( + &ctx.accounts.merkle_tree.to_account_info(), + mt_account_size, + )?; + + let mt_account_info = ctx.accounts.merkle_tree.to_account_info(); + let mt_data = &mut mt_account_info.try_borrow_mut_data()?; + + init_batched_address_merkle_tree_account(owner, params, mt_data, merkle_tree_rent)?; + + Ok(()) +} + +pub fn init_batched_address_merkle_tree_account( + owner: Pubkey, + params: InitAddressTreeAccountsInstructionData, + mt_account_data: &mut [u8], + merkle_tree_rent: u64, +) -> Result<()> { + let num_batches_input_queue = params.input_queue_num_batches; + let height = params.height; + + let rollover_fee = match params.rollover_threshold { + Some(rollover_threshold) => { + let rent = merkle_tree_rent; + let rollover_fee = compute_rollover_fee(rollover_threshold, height, rent) + .map_err(ProgramError::from)?; + check_rollover_fee_sufficient(rollover_fee, 0, rent, rollover_threshold, height)?; + rollover_fee + } + None => 0, + }; + + let metadata = MerkleTreeMetadata { + next_merkle_tree: Pubkey::default(), + access_metadata: crate::AccessMetadata::new(owner, params.program_owner, params.forester), + rollover_metadata: crate::RolloverMetadata::new( + params.index, + rollover_fee, + params.rollover_threshold, + params.network_fee.unwrap_or_default(), + params.close_threshold, + None, + ), + associated_queue: Pubkey::default(), + }; + ZeroCopyBatchedMerkleTreeAccount::init( + metadata, + params.root_history_capacity, + num_batches_input_queue, + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + height, + mt_account_data, + params.bloom_filter_num_iters, + params.bloom_filter_capacity, + TreeType::BatchedAddress, + )?; + Ok(()) +} + +pub fn validate_batched_address_tree_params(params: InitAddressTreeAccountsInstructionData) { + assert!(params.input_queue_batch_size > 0); + assert_eq!( + params.input_queue_batch_size % params.input_queue_zkp_batch_size, + 0, + "Input queue batch size must divisible by input_queue_zkp_batch_size." + ); + assert!( + match_circuit_size(params.input_queue_zkp_batch_size), + "Zkp batch size not supported. Supported 1, 10, 100, 500, 1000" + ); + + assert!(params.bloom_filter_num_iters > 0); + assert!(params.bloom_filter_capacity > params.input_queue_batch_size * 8); + assert_eq!( + params.bloom_filter_capacity % 8, + 0, + "Bloom filter capacity must be divisible by 8." + ); + assert!(params.bloom_filter_capacity > 0); + assert!(params.root_history_capacity > 0); + assert!(params.input_queue_batch_size > 0); + assert_eq!(params.input_queue_num_batches, 2); + assert_eq!(params.close_threshold, None); + assert_eq!(params.height, 26); +} + +#[cfg(test)] +pub mod address_tree_tests { + + use light_bounded_vec::{BoundedVecMetadata, CyclicBoundedVecMetadata}; + use rand::{rngs::StdRng, Rng}; + + use crate::{ + assert_address_mt_zero_copy_inited, + batch::Batch, + batched_merkle_tree::{get_merkle_tree_account_size, get_merkle_tree_account_size_default}, + }; + + use super::*; + + #[test] + fn test_account_init() { + let owner = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size_default(); + let mut mt_account_data = vec![0; mt_account_size]; + + let params = InitAddressTreeAccountsInstructionData::test_default(); + + let merkle_tree_rent = 1_000_000_000; + init_batched_address_merkle_tree_account( + owner, + params.clone(), + &mut mt_account_data, + merkle_tree_rent, + ) + .unwrap(); + + let ref_mt_account = BatchedMerkleTreeAccount::get_address_tree_default( + owner, + None, + None, + params.rollover_threshold, + 0, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + merkle_tree_rent, + ); + assert_address_mt_zero_copy_inited( + &mut mt_account_data, + ref_mt_account, + params.bloom_filter_num_iters, + ); + } + + #[test] + fn test_rnd_account_init() { + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + for _ in 0..10000 { + println!("next iter ------------------------------------"); + let owner = Pubkey::new_unique(); + + let program_owner = if rng.gen_bool(0.5) { + Some(Pubkey::new_unique()) + } else { + None + }; + let forester = if rng.gen_bool(0.5) { + Some(Pubkey::new_unique()) + } else { + None + }; + let input_queue_zkp_batch_size = rng.gen_range(1..1000); + + let params = InitAddressTreeAccountsInstructionData { + index: rng.gen_range(0..1000), + program_owner, + forester, + bloom_filter_num_iters: rng.gen_range(0..4), + input_queue_batch_size: rng.gen_range(1..1000) * input_queue_zkp_batch_size, + input_queue_zkp_batch_size, + // 8 bits per byte, divisible by 8 for aligned memory + bloom_filter_capacity: rng.gen_range(0..100) * 8 * 8, + network_fee: Some(rng.gen_range(0..1000)), + rollover_threshold: Some(rng.gen_range(0..100)), + close_threshold: None, + root_history_capacity: rng.gen_range(1..1000), + input_queue_num_batches: rng.gen_range(1..4), + height: rng.gen_range(1..32), + }; + + use std::mem::size_of; + + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + { + let num_zkp_batches = + params.input_queue_batch_size / params.input_queue_zkp_batch_size; + let num_batches = params.input_queue_num_batches as usize; + let batch_size = size_of::() * num_batches + size_of::(); + let bloom_filter_size = (params.bloom_filter_capacity as usize / 8 + + size_of::()) + * num_batches; + let hash_chain_store_size = + (num_zkp_batches as usize * 32 + size_of::()) * num_batches; + let root_history_size = params.root_history_capacity as usize * 32 + + size_of::(); + // Output queue + let ref_account_size = + // metadata + BatchedMerkleTreeAccount::LEN + + root_history_size + + batch_size + + bloom_filter_size + // 2 hash chain stores + + hash_chain_store_size; + assert_eq!(mt_account_size, ref_account_size); + } + let mut mt_account_data = vec![0; mt_account_size]; + + let merkle_tree_rent = rng.gen_range(0..10000000); + + init_batched_address_merkle_tree_account( + owner, + params.clone(), + &mut mt_account_data, + merkle_tree_rent, + ) + .unwrap(); + let ref_mt_account = BatchedMerkleTreeAccount::get_address_tree_default( + owner, + program_owner, + forester, + params.rollover_threshold, + params.index, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + merkle_tree_rent, + ); + assert_address_mt_zero_copy_inited( + &mut mt_account_data, + ref_mt_account, + params.bloom_filter_num_iters, + ); + } + } +} diff --git a/programs/account-compression/src/instructions/intialize_batched_state_merkle_tree.rs b/programs/account-compression/src/instructions/intialize_batched_state_merkle_tree.rs index fd44e2af34..3084562403 100644 --- a/programs/account-compression/src/instructions/intialize_batched_state_merkle_tree.rs +++ b/programs/account-compression/src/instructions/intialize_batched_state_merkle_tree.rs @@ -4,7 +4,8 @@ use light_utils::fee::compute_rollover_fee; use crate::{ batched_merkle_tree::{ - get_merkle_tree_account_size, BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount, + get_merkle_tree_account_size, BatchedMerkleTreeAccount, TreeType, + ZeroCopyBatchedMerkleTreeAccount, }, batched_queue::{ assert_queue_inited, get_output_queue_account_size, BatchedQueue, BatchedQueueAccount, @@ -18,8 +19,8 @@ use crate::{ check_signer_is_registered_or_authority, GroupAccounts, }, constants::{ - DEFAULT_BATCH_SIZE, DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, DEFAULT_ZKP_BATCH_SIZE, - TEST_DEFAULT_BATCH_SIZE, TEST_DEFAULT_ZKP_BATCH_SIZE, + ADDRESS_TREE_INIT_ROOT_26, DEFAULT_BATCH_SIZE, DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, + DEFAULT_ZKP_BATCH_SIZE, TEST_DEFAULT_BATCH_SIZE, TEST_DEFAULT_ZKP_BATCH_SIZE, }, }, AccessMetadata, MerkleTreeMetadata, QueueMetadata, QueueType, RegisteredProgram, @@ -316,6 +317,7 @@ pub fn init_batched_state_merkle_tree_accounts( mt_account_data, params.bloom_filter_num_iters, params.bloom_filter_capacity, + TreeType::BatchedState, )?; Ok(()) } @@ -362,18 +364,48 @@ pub fn match_circuit_size(size: u64) -> bool { matches!(size, 10 | 100 | 500 | 1000) } -pub fn assert_mt_zero_copy_inited( +pub fn assert_state_mt_zero_copy_inited( account_data: &mut [u8], ref_account: BatchedMerkleTreeAccount, num_iters: u64, ) { - let mut zero_copy_account = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data) - .expect("from_bytes_mut failed"); + let zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut(account_data) + .expect("from_bytes_mut failed"); + _assert_mt_zero_copy_inited( + zero_copy_account, + ref_account, + num_iters, + TreeType::BatchedState as u64, + ); +} + +pub fn assert_address_mt_zero_copy_inited( + account_data: &mut [u8], + ref_account: BatchedMerkleTreeAccount, + num_iters: u64, +) { + let zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut(account_data) + .expect("from_bytes_mut failed"); + _assert_mt_zero_copy_inited( + zero_copy_account, + ref_account, + num_iters, + TreeType::Address as u64, + ); +} + +fn _assert_mt_zero_copy_inited( + mut zero_copy_account: ZeroCopyBatchedMerkleTreeAccount, + ref_account: BatchedMerkleTreeAccount, + num_iters: u64, + tree_type: u64, +) { let queue = zero_copy_account.get_account().queue; let ref_queue = ref_account.queue; - let queue_type = QueueType::Input as u64; let num_batches = ref_queue.num_batches as usize; - + let mut next_index = zero_copy_account.get_account().next_index; assert_eq!( *zero_copy_account.get_account(), ref_account, @@ -385,16 +417,35 @@ pub fn assert_mt_zero_copy_inited( ref_account.root_history_capacity as usize, "root_history_capacity mismatch" ); - assert_eq!( - *zero_copy_account.root_history.get(0).unwrap(), - light_hasher::Poseidon::zero_bytes()[ref_account.height as usize], - "root_history not initialized" - ); + if tree_type == TreeType::BatchedState as u64 { + assert_eq!( + *zero_copy_account.root_history.get(0).unwrap(), + light_hasher::Poseidon::zero_bytes()[ref_account.height as usize], + "root_history not initialized" + ); + } + if tree_type == TreeType::BatchedAddress as u64 { + assert_eq!( + *zero_copy_account.root_history.get(0).unwrap(), + ADDRESS_TREE_INIT_ROOT_26, + "root_history not initialized" + ); + } assert_eq!( zero_copy_account.hashchain_store[0].metadata().capacity(), ref_account.queue.get_num_zkp_batches() as usize, "hashchain_store mismatch" ); + + if tree_type == TreeType::BatchedAddress as u64 { + next_index = 2; + } + + let queue_type = if tree_type == TreeType::BatchedState as u64 { + QueueType::Input as u64 + } else { + QueueType::Address as u64 + }; assert_queue_inited( queue, ref_queue, @@ -404,6 +455,7 @@ pub fn assert_mt_zero_copy_inited( &mut zero_copy_account.batches, num_batches, num_iters, + next_index, ); } @@ -588,7 +640,7 @@ pub mod tests { params.height, params.input_queue_num_batches, ); - assert_mt_zero_copy_inited( + assert_state_mt_zero_copy_inited( &mut mt_account_data, ref_mt_account, params.bloom_filter_num_iters, @@ -753,7 +805,7 @@ pub mod tests { params.height, params.input_queue_num_batches, ); - assert_mt_zero_copy_inited( + assert_state_mt_zero_copy_inited( &mut mt_account_data, ref_mt_account, params.bloom_filter_num_iters, diff --git a/programs/account-compression/src/instructions/mod.rs b/programs/account-compression/src/instructions/mod.rs index f195aeada8..9d327c6489 100644 --- a/programs/account-compression/src/instructions/mod.rs +++ b/programs/account-compression/src/instructions/mod.rs @@ -45,3 +45,12 @@ pub use batch_append::*; pub mod rollover_batch_state_merkle_tree; pub use rollover_batch_state_merkle_tree::*; + +pub mod intialize_batch_address_merkle_tree; +pub use intialize_batch_address_merkle_tree::*; + +pub mod batch_update_address_tree; +pub use batch_update_address_tree::*; + +pub mod rollover_batch_address_merkle_tree; +pub use rollover_batch_address_merkle_tree::*; diff --git a/programs/account-compression/src/instructions/rollover_batch_address_merkle_tree.rs b/programs/account-compression/src/instructions/rollover_batch_address_merkle_tree.rs new file mode 100644 index 0000000000..8c82601f61 --- /dev/null +++ b/programs/account-compression/src/instructions/rollover_batch_address_merkle_tree.rs @@ -0,0 +1,484 @@ +use crate::{ + batched_merkle_tree::{BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount}, + init_batched_address_merkle_tree_account, + utils::{ + check_account::check_account_balance_is_rent_exempt, + check_signer_is_registered_or_authority::{ + check_signer_is_registered_or_authority, GroupAccounts, + }, + if_equals_none, + transfer_lamports::transfer_lamports, + }, + InitAddressTreeAccountsInstructionData, RegisteredProgram, +}; +use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey}; + +use super::assert_address_mt_zero_copy_inited; + +#[derive(Accounts)] +pub struct RolloverBatchAddressMerkleTree<'info> { + #[account(mut)] + /// Signer used to receive rollover accounts rentexemption reimbursement. + pub fee_payer: Signer<'info>, + pub authority: Signer<'info>, + pub registered_program_pda: Option>, + /// CHECK: in account compression program. + #[account(zero)] + pub new_address_merkle_tree: AccountInfo<'info>, + /// CHECK: cecked in manual deserialization. + #[account(mut)] + pub old_address_merkle_tree: AccountInfo<'info>, +} + +impl<'info> GroupAccounts<'info> for RolloverBatchAddressMerkleTree<'info> { + fn get_authority(&self) -> &Signer<'info> { + &self.authority + } + fn get_registered_program_pda(&self) -> &Option> { + &self.registered_program_pda + } +} + +/// Checks: +/// 1. Merkle tree is ready to be rolled over +/// 2. Merkle tree is not already rolled over +/// 3. Rollover threshold is configured, if not tree cannot be rolled over +/// +/// Actions: +/// 1. mark Merkle tree as rolled over in this slot +/// 2. initialize new Merkle tree and nullifier queue with the same parameters +pub fn process_rollover_batch_address_merkle_tree<'a, 'b, 'c: 'info, 'info>( + ctx: Context<'a, 'b, 'c, 'info, RolloverBatchAddressMerkleTree<'info>>, + network_fee: Option, +) -> Result<()> { + let old_merkle_tree_account = + &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_account_info_mut( + &ctx.accounts.old_address_merkle_tree, + )?; + check_signer_is_registered_or_authority::< + RolloverBatchAddressMerkleTree, + ZeroCopyBatchedMerkleTreeAccount, + >(&ctx, old_merkle_tree_account)?; + + let merkle_tree_rent = check_account_balance_is_rent_exempt( + &ctx.accounts.new_address_merkle_tree.to_account_info(), + ctx.accounts + .old_address_merkle_tree + .to_account_info() + .data_len(), + )?; + let new_mt_data = &mut ctx.accounts.new_address_merkle_tree.try_borrow_mut_data()?; + rollover_batch_address_tree( + old_merkle_tree_account, + new_mt_data, + merkle_tree_rent, + ctx.accounts.new_address_merkle_tree.key(), + network_fee, + )?; + + transfer_lamports( + &ctx.accounts.old_address_merkle_tree.to_account_info(), + &ctx.accounts.fee_payer.to_account_info(), + merkle_tree_rent, + )?; + + Ok(()) +} + +pub fn rollover_batch_address_tree( + old_merkle_tree: &mut ZeroCopyBatchedMerkleTreeAccount, + new_mt_data: &mut [u8], + new_mt_rent: u64, + new_mt_pubkey: Pubkey, + network_fee: Option, +) -> Result<()> { + old_merkle_tree + .get_account_mut() + .metadata + .rollover(Pubkey::default(), new_mt_pubkey)?; + let old_merkle_tree_account = old_merkle_tree.get_account(); + + if old_merkle_tree_account.next_index + < ((1 << old_merkle_tree_account.height) + * old_merkle_tree_account + .metadata + .rollover_metadata + .rollover_threshold + / 100) + { + return err!(crate::errors::AccountCompressionErrorCode::NotReadyForRollover); + } + if old_merkle_tree_account + .metadata + .rollover_metadata + .network_fee + == 0 + && network_fee.is_some() + { + msg!("Network fee must be 0 for manually forested trees."); + return err!(crate::errors::AccountCompressionErrorCode::InvalidNetworkFee); + } + + let params = InitAddressTreeAccountsInstructionData { + index: old_merkle_tree_account.metadata.rollover_metadata.index, + program_owner: if_equals_none( + old_merkle_tree_account + .metadata + .access_metadata + .program_owner, + Pubkey::default(), + ), + forester: if_equals_none( + old_merkle_tree_account.metadata.access_metadata.forester, + Pubkey::default(), + ), + height: old_merkle_tree_account.height, + input_queue_batch_size: old_merkle_tree_account.queue.batch_size, + input_queue_zkp_batch_size: old_merkle_tree_account.queue.zkp_batch_size, + bloom_filter_capacity: old_merkle_tree_account.queue.bloom_filter_capacity, + bloom_filter_num_iters: old_merkle_tree.batches[0].num_iters, + root_history_capacity: old_merkle_tree_account.root_history_capacity, + network_fee, + rollover_threshold: if_equals_none( + old_merkle_tree_account + .metadata + .rollover_metadata + .rollover_threshold, + u64::MAX, + ), + close_threshold: if_equals_none( + old_merkle_tree_account + .metadata + .rollover_metadata + .close_threshold, + u64::MAX, + ), + input_queue_num_batches: old_merkle_tree_account.queue.num_batches, + }; + + init_batched_address_merkle_tree_account( + old_merkle_tree_account.metadata.access_metadata.owner, + params, + new_mt_data, + new_mt_rent, + ) +} + +#[cfg(test)] +mod address_tree_rollover_tests { + use light_bounded_vec::{BoundedVecMetadata, CyclicBoundedVecMetadata}; + use rand::thread_rng; + use solana_sdk::pubkey::Pubkey; + + use crate::{ + assert_address_mt_zero_copy_inited, + batch::Batch, + batched_merkle_tree::{ + get_merkle_tree_account_size, get_merkle_tree_account_size_default, + BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount, + }, + init_batched_address_merkle_tree_account, InitAddressTreeAccountsInstructionData, + }; + + use super::{assert_address_mt_roll_over, rollover_batch_address_tree}; + + /// Test rollover of address tree + /// 1. failing: not ready for rollover + /// 2. failing: already rolled over + /// 3. functional: rollover address tree + /// 4. failing: rollover threshold not set + #[test] + fn test_rollover() { + let owner = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size_default(); + let mut mt_account_data = vec![0; mt_account_size]; + + let params = InitAddressTreeAccountsInstructionData::test_default(); + let merkle_tree_rent = 1_000_000_000; + // create first merkle tree + + init_batched_address_merkle_tree_account( + owner, + params.clone(), + &mut mt_account_data, + merkle_tree_rent, + ) + .unwrap(); + + let ref_mt_account = BatchedMerkleTreeAccount::get_address_tree_default( + owner, + None, + None, + params.rollover_threshold, + 0, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + merkle_tree_rent, + ); + assert_address_mt_zero_copy_inited( + &mut mt_account_data, + ref_mt_account, + params.bloom_filter_num_iters, + ); + + let mut new_mt_account_data = vec![0; mt_account_size]; + let new_mt_pubkey = Pubkey::new_unique(); + + // 1. Failing: not ready for rollover + { + let mut mt_account_data = mt_account_data.clone(); + let result = rollover_batch_address_tree( + &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(), + &mut new_mt_account_data, + merkle_tree_rent, + new_mt_pubkey, + params.network_fee, + ); + assert_eq!( + result, + Err(crate::errors::AccountCompressionErrorCode::NotReadyForRollover.into()) + ); + } + // 2. Failing rollover threshold not set + { + let mut mt_account_data = mt_account_data.clone(); + let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + merkle_tree + .get_account_mut() + .metadata + .rollover_metadata + .rollover_threshold = u64::MAX; + let result = rollover_batch_address_tree( + merkle_tree, + &mut new_mt_account_data, + merkle_tree_rent, + new_mt_pubkey, + params.network_fee, + ); + assert_eq!( + result, + Err(crate::errors::AccountCompressionErrorCode::RolloverNotConfigured.into()) + ); + } + // 3. Functional: rollover address tree + { + let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + merkle_tree.get_account_mut().next_index = 1 << merkle_tree.get_account().height; + + rollover_batch_address_tree( + merkle_tree, + &mut new_mt_account_data, + merkle_tree_rent, + new_mt_pubkey, + params.network_fee, + ) + .unwrap(); + let new_ref_mt_account = ref_mt_account.clone(); + + let mut ref_rolledover_mt = ref_mt_account.clone(); + ref_rolledover_mt.next_index = 1 << ref_rolledover_mt.height; + assert_address_mt_roll_over( + mt_account_data.to_vec(), + ref_rolledover_mt, + new_mt_account_data.to_vec(), + new_ref_mt_account, + new_mt_pubkey, + params.bloom_filter_num_iters, + ); + } + // 4. Failing: already rolled over + { + let mut mt_account_data = mt_account_data.clone(); + let mut new_mt_account_data = vec![0; mt_account_size]; + + let result = rollover_batch_address_tree( + &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(), + &mut new_mt_account_data, + merkle_tree_rent, + new_mt_pubkey, + params.network_fee, + ); + assert_eq!( + result, + Err(crate::errors::AccountCompressionErrorCode::MerkleTreeAlreadyRolledOver.into()) + ); + } + } + + #[test] + fn test_rnd_rollover() { + use rand::Rng; + let mut rng = thread_rng(); + for _ in 0..10000 { + let owner = Pubkey::new_unique(); + + let program_owner = if rng.gen_bool(0.5) { + Some(Pubkey::new_unique()) + } else { + None + }; + let forester = if rng.gen_bool(0.5) { + Some(Pubkey::new_unique()) + } else { + None + }; + let input_queue_zkp_batch_size = rng.gen_range(1..1000); + + let mut params = InitAddressTreeAccountsInstructionData { + index: rng.gen_range(0..1000), + program_owner, + forester, + bloom_filter_num_iters: rng.gen_range(0..4), + input_queue_batch_size: rng.gen_range(1..1000) * input_queue_zkp_batch_size, + input_queue_zkp_batch_size, + // 8 bits per byte, divisible by 8 for aligned memory + bloom_filter_capacity: rng.gen_range(0..100) * 8 * 8, + network_fee: Some(rng.gen_range(1..1000)), + rollover_threshold: Some(rng.gen_range(0..100)), + close_threshold: None, + root_history_capacity: rng.gen_range(1..1000), + input_queue_num_batches: rng.gen_range(1..4), + height: rng.gen_range(1..32), + }; + if forester.is_some() { + params.network_fee = None; + } + + use std::mem::size_of; + + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + { + let num_zkp_batches = + params.input_queue_batch_size / params.input_queue_zkp_batch_size; + let num_batches = params.input_queue_num_batches as usize; + let batch_size = size_of::() * num_batches + size_of::(); + let bloom_filter_size = (params.bloom_filter_capacity as usize / 8 + + size_of::()) + * num_batches; + let hash_chain_store_size = + (num_zkp_batches as usize * 32 + size_of::()) * num_batches; + let root_history_size = params.root_history_capacity as usize * 32 + + size_of::(); + // Output queue + let ref_account_size = + // metadata + BatchedMerkleTreeAccount::LEN + + root_history_size + + batch_size + + bloom_filter_size + // 2 hash chain stores + + hash_chain_store_size; + assert_eq!(mt_account_size, ref_account_size); + } + let mut mt_account_data = vec![0; mt_account_size]; + + let merkle_tree_rent = rng.gen_range(0..10000000); + + init_batched_address_merkle_tree_account( + owner, + params.clone(), + &mut mt_account_data, + merkle_tree_rent, + ) + .unwrap(); + let ref_mt_account = BatchedMerkleTreeAccount::get_address_tree_default( + owner, + program_owner, + forester, + params.rollover_threshold, + params.index, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + merkle_tree_rent, + ); + assert_address_mt_zero_copy_inited( + &mut mt_account_data, + ref_mt_account, + params.bloom_filter_num_iters, + ); + let mut new_mt_data = vec![0; mt_account_size]; + let new_mt_rent = merkle_tree_rent; + let network_fee = params.network_fee; + let new_mt_pubkey = Pubkey::new_unique(); + let mut zero_copy_old_mt = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut(&mut mt_account_data) + .unwrap(); + zero_copy_old_mt.get_account_mut().next_index = 1 << params.height; + rollover_batch_address_tree( + &mut zero_copy_old_mt, + &mut new_mt_data, + new_mt_rent, + new_mt_pubkey, + network_fee, + ) + .unwrap(); + let new_ref_mt_account = ref_mt_account.clone(); + let mut ref_rolled_over_account = ref_mt_account.clone(); + ref_rolled_over_account.next_index = 1 << params.height; + + assert_address_mt_roll_over( + mt_account_data, + ref_rolled_over_account, + new_mt_data, + new_ref_mt_account, + new_mt_pubkey, + params.bloom_filter_num_iters, + ); + } + } +} + +// TODO: assert that remainder of old_mt_account_data is not changed +pub fn assert_address_mt_roll_over( + mut old_mt_account_data: Vec, + mut old_ref_mt_account: BatchedMerkleTreeAccount, + mut new_mt_account_data: Vec, + new_ref_mt_account: BatchedMerkleTreeAccount, + new_mt_pubkey: Pubkey, + bloom_filter_num_iters: u64, +) { + old_ref_mt_account + .metadata + .rollover(Pubkey::default(), new_mt_pubkey) + .unwrap(); + let old_mt_account = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut(&mut old_mt_account_data) + .unwrap(); + assert_eq!(old_mt_account.get_account(), &old_ref_mt_account); + + assert_address_mt_zero_copy_inited( + &mut new_mt_account_data, + new_ref_mt_account, + bloom_filter_num_iters, + ); +} diff --git a/programs/account-compression/src/instructions/rollover_batch_state_merkle_tree.rs b/programs/account-compression/src/instructions/rollover_batch_state_merkle_tree.rs index 8b53a94293..800028c662 100644 --- a/programs/account-compression/src/instructions/rollover_batch_state_merkle_tree.rs +++ b/programs/account-compression/src/instructions/rollover_batch_state_merkle_tree.rs @@ -16,7 +16,7 @@ use crate::{ }; use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey}; -use super::assert_mt_zero_copy_inited; +use super::assert_state_mt_zero_copy_inited; #[derive(Accounts)] pub struct RolloverBatchStateMerkleTree<'info> { @@ -218,7 +218,7 @@ mod batch_state_tree_rollover_tests { use solana_sdk::pubkey::Pubkey; use crate::{ - assert_mt_zero_copy_inited, + assert_state_mt_zero_copy_inited, batched_merkle_tree::{ get_merkle_tree_account_size, get_merkle_tree_account_size_default, BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount, @@ -292,7 +292,7 @@ mod batch_state_tree_rollover_tests { params.height, params.input_queue_num_batches, ); - assert_mt_zero_copy_inited( + assert_state_mt_zero_copy_inited( &mut mt_account_data, ref_mt_account, params.bloom_filter_num_iters, @@ -831,7 +831,7 @@ mod batch_state_tree_rollover_tests { params.height, params.input_queue_num_batches, ); - assert_mt_zero_copy_inited( + assert_state_mt_zero_copy_inited( &mut mt_account_data, ref_mt_account, params.bloom_filter_num_iters, @@ -923,7 +923,8 @@ pub fn assert_state_mt_roll_over( .rolledover_slot = slot; assert_queue_zero_copy_inited(&mut new_queue_account_data, ref_queue_account, 0); - // assert_queue_zero_copy_inited(&mut queue_account_data, ref_rolledover_queue, 0); + println!("asserted queue roll over"); + let zero_copy_queue = ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut queue_account_data).unwrap(); assert_eq!( @@ -942,6 +943,7 @@ pub fn assert_state_mt_roll_over( ) } +// TODO: assert that the rest of the rolled over account didn't change pub fn assert_mt_roll_over( mut mt_account_data: Vec, ref_mt_account: BatchedMerkleTreeAccount, @@ -957,13 +959,11 @@ pub fn assert_mt_roll_over( .rollover(old_queue_pubkey, new_mt_pubkey) .unwrap(); ref_rolledover_mt.metadata.rollover_metadata.rolledover_slot = slot; + let zero_copy_mt = + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut(&mut mt_account_data).unwrap(); + assert_eq!(*zero_copy_mt.get_account(), ref_rolledover_mt); - assert_mt_zero_copy_inited( - &mut mt_account_data, - ref_rolledover_mt, - bloom_filter_num_iters, - ); - assert_mt_zero_copy_inited( + assert_state_mt_zero_copy_inited( &mut new_mt_account_data, ref_mt_account, bloom_filter_num_iters, diff --git a/programs/account-compression/src/lib.rs b/programs/account-compression/src/lib.rs index 23978159f3..39e33c8990 100644 --- a/programs/account-compression/src/lib.rs +++ b/programs/account-compression/src/lib.rs @@ -10,7 +10,7 @@ pub mod utils; pub use processor::*; pub mod sdk; use anchor_lang::prelude::*; -use batched_merkle_tree::InstructionDataBatchNullifyInputs; +use batched_merkle_tree::{InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs}; declare_id!("compr6CUsB5m2jS4Y3831ztGSTnDpnKJTKS95d64XVq"); @@ -25,7 +25,6 @@ solana_security_txt::security_txt! { #[program] pub mod account_compression { - use batched_merkle_tree::InstructionDataBatchAppendInputs; use errors::AccountCompressionErrorCode; use self::insert_into_queues::{process_insert_into_queues, InsertIntoQueues}; @@ -216,8 +215,7 @@ pub mod account_compression { ) -> Result<()> { let instruction_data = InstructionDataBatchNullifyInputs::try_from_slice(&data) .map_err(|_| AccountCompressionErrorCode::InputDeserializationFailed)?; - process_batch_nullify(&ctx, instruction_data)?; - Ok(()) + process_batch_nullify(&ctx, instruction_data) } pub fn batch_append<'a, 'b, 'c: 'info, 'info>( @@ -226,8 +224,30 @@ pub mod account_compression { ) -> Result<()> { let instruction_data = InstructionDataBatchAppendInputs::try_from_slice(&data) .map_err(|_| AccountCompressionErrorCode::InputDeserializationFailed)?; - process_batch_append_leaves(&ctx, instruction_data)?; - Ok(()) + process_batch_append_leaves(&ctx, instruction_data) + } + + pub fn batch_update_address_tree<'a, 'b, 'c: 'info, 'info>( + ctx: Context<'a, 'b, 'c, 'info, BatchUpdateAddressTree<'info>>, + data: Vec, + ) -> Result<()> { + let instruction_data = InstructionDataBatchNullifyInputs::try_from_slice(&data) + .map_err(|_| AccountCompressionErrorCode::InputDeserializationFailed)?; + process_batch_update_address_tree(&ctx, instruction_data) + } + + pub fn intialize_batched_address_merkle_tree<'info>( + ctx: Context<'_, '_, '_, 'info, InitializeBatchAddressMerkleTree<'info>>, + params: InitAddressTreeAccountsInstructionData, + ) -> Result<()> { + process_initialize_batched_address_merkle_tree(ctx, params) + } + + pub fn rollover_batch_address_merkle_tree<'a, 'b, 'c: 'info, 'info>( + ctx: Context<'a, 'b, 'c, 'info, RolloverBatchAddressMerkleTree<'info>>, + network_fee: Option, + ) -> Result<()> { + process_rollover_batch_address_merkle_tree(ctx, network_fee) } pub fn rollover_batch_state_merkle_tree<'a, 'b, 'c: 'info, 'info>( diff --git a/programs/account-compression/src/state/batch.rs b/programs/account-compression/src/state/batch.rs index faef58ddc2..9a20af9b8c 100644 --- a/programs/account-compression/src/state/batch.rs +++ b/programs/account-compression/src/state/batch.rs @@ -11,9 +11,8 @@ pub enum BatchState { CanBeFilled, /// Batch has been inserted into the tree. Inserted, - /// Batch is ready to be inserted into the tree. Possibly it is already - /// partially inserted into the tree. - ReadyToUpdateTree, + /// Batch is full, and insertion is in progress. + Full, } #[derive(Clone, Debug, PartialEq, Eq)] @@ -68,7 +67,7 @@ impl Batch { self.state } - /// fill -> ready -> inserted -> fill + /// fill -> full -> inserted -> fill pub fn advance_state_to_can_be_filled(&mut self) -> Result<()> { if self.state == BatchState::Inserted { self.state = BatchState::CanBeFilled; @@ -82,9 +81,9 @@ impl Batch { Ok(()) } - /// fill -> ready -> inserted -> fill + /// fill -> full -> inserted -> fill pub fn advance_state_to_inserted(&mut self) -> Result<()> { - if self.state == BatchState::ReadyToUpdateTree { + if self.state == BatchState::Full { self.state = BatchState::Inserted; } else { msg!( @@ -96,10 +95,10 @@ impl Batch { Ok(()) } - /// fill -> ready -> inserted -> fill - pub fn advance_state_to_ready_to_update_tree(&mut self) -> Result<()> { + /// fill -> full -> inserted -> fill + pub fn advance_state_to_full(&mut self) -> Result<()> { if self.state == BatchState::CanBeFilled { - self.state = BatchState::ReadyToUpdateTree; + self.state = BatchState::Full; } else { msg!( "Batch is in incorrect state {} expected ReadyToUpdateTree 2", @@ -110,6 +109,16 @@ impl Batch { Ok(()) } + pub fn get_first_ready_zkp_batch(&self) -> Result { + if self.state == BatchState::Inserted { + err!(AccountCompressionErrorCode::BatchAlreadyInserted) + } else if self.current_zkp_batch_index > self.num_inserted_zkps { + Ok(self.num_inserted_zkps) + } else { + err!(AccountCompressionErrorCode::BatchNotReady) + } + } + pub fn get_num_inserted(&self) -> u64 { self.num_inserted } @@ -122,6 +131,10 @@ impl Batch { self.num_inserted_zkps } + pub fn get_num_inserted_elements(&self) -> u64 { + self.num_inserted_zkps * self.zkp_batch_size + self.num_inserted + } + pub fn store_value( &mut self, value: &[u8; 32], @@ -153,6 +166,7 @@ impl Batch { store: &mut [u8], hashchain_store: &mut BoundedVec<[u8; 32]>, ) -> Result<()> { + println!("batch insert bloom filter value {:?}", bloom_filter_value); let mut bloom_filter = BloomFilter::new(self.num_iters as usize, self.bloom_filter_capacity, store) .map_err(ProgramError::from)?; @@ -182,7 +196,7 @@ impl Batch { } if self.get_num_zkp_batches() == self.current_zkp_batch_index { - self.advance_state_to_ready_to_update_tree()?; + self.advance_state_to_full()?; self.num_inserted = 0; } @@ -213,13 +227,17 @@ impl Batch { root_index: u32, root_history_length: u32, ) -> Result<()> { - if self.state != BatchState::ReadyToUpdateTree { - return err!(AccountCompressionErrorCode::BatchNotReady); - } + // Check that batch is ready. + self.get_first_ready_zkp_batch()?; + let num_zkp_batches = self.get_num_zkp_batches(); self.num_inserted_zkps += 1; - + msg!( + "Marking batch as inserted in the merkle tree. num_inserted_zkps: {}", + self.num_inserted_zkps + ); + msg!("num_zkp_batches: {}", num_zkp_batches); // Batch has been successfully inserted into the tree. if self.num_inserted_zkps == num_zkp_batches { self.current_zkp_batch_index = 0; @@ -274,7 +292,7 @@ mod tests { .mark_as_inserted_in_merkle_tree(sequence_number, root_index, root_history_length) .unwrap(); if i != batch.get_num_zkp_batches() - 1 { - assert_eq!(batch.get_state(), BatchState::ReadyToUpdateTree); + assert_eq!(batch.get_state(), BatchState::Full); assert_eq!(batch.get_num_inserted(), 0); assert_eq!(batch.get_current_zkp_batch_index(), 5); assert_eq!(batch.get_num_inserted_zkps(), i + 1); @@ -315,7 +333,7 @@ mod tests { ref_batch.current_zkp_batch_index += 1; } if ref_batch.current_zkp_batch_index == ref_batch.get_num_zkp_batches() { - ref_batch.state = BatchState::ReadyToUpdateTree; + ref_batch.state = BatchState::Full; ref_batch.num_inserted = 0; } assert_eq!(batch, ref_batch); @@ -326,7 +344,7 @@ mod tests { result.unwrap_err(), AccountCompressionErrorCode::BatchNotReady.into() ); - assert_eq!(batch.get_state(), BatchState::ReadyToUpdateTree); + assert_eq!(batch.get_state(), BatchState::Full); assert_eq!(batch.get_num_inserted(), 0); assert_eq!(batch.get_current_zkp_batch_index(), 5); assert_eq!(batch.get_num_zkp_batches(), 5); @@ -371,7 +389,7 @@ mod tests { ref_batch.current_zkp_batch_index += 1; } if i == batch.batch_size - 1 { - ref_batch.state = BatchState::ReadyToUpdateTree; + ref_batch.state = BatchState::Full; ref_batch.num_inserted = 0; } assert_eq!(batch, ref_batch); @@ -434,8 +452,8 @@ mod tests { assert_eq!(batch.get_num_inserted(), 0); assert_eq!(batch.get_current_zkp_batch_index(), 0); assert_eq!(batch.get_num_inserted_zkps(), 0); - batch.advance_state_to_ready_to_update_tree().unwrap(); - assert_eq!(batch.get_state(), BatchState::ReadyToUpdateTree); + batch.advance_state_to_full().unwrap(); + assert_eq!(batch.get_state(), BatchState::Full); batch.advance_state_to_inserted().unwrap(); assert_eq!(batch.get_state(), BatchState::Inserted); } @@ -447,7 +465,7 @@ mod tests { #[test] fn test_value_is_inserted_in_batch() { let mut batch = get_test_batch(); - batch.advance_state_to_ready_to_update_tree().unwrap(); + batch.advance_state_to_full().unwrap(); batch.advance_state_to_inserted().unwrap(); batch.start_index = 1; let lowest_eligible_value = batch.start_index; @@ -482,4 +500,45 @@ mod tests { false ); } + + /// 1. Failing: empty batch + /// 2. Functional: if zkp batch size is full else failing + /// 3. Failing: batch is completely inserted + #[test] + fn test_can_insert_batch() { + let mut batch = get_test_batch(); + assert_eq!( + batch.get_first_ready_zkp_batch(), + Err(AccountCompressionErrorCode::BatchNotReady.into()) + ); + let mut bounded_vec = BoundedVec::with_capacity(batch.batch_size as usize); + let mut value_store = BoundedVec::with_capacity(batch.batch_size as usize); + + for i in 0..batch.batch_size + 10 { + let mut value = [0u8; 32]; + value[24..].copy_from_slice(&i.to_be_bytes()); + if i < batch.batch_size { + batch + .store_and_hash_value(&value, &mut value_store, &mut bounded_vec) + .unwrap(); + } + if (i + 1) % batch.zkp_batch_size == 0 && i != 0 { + assert_eq!( + batch.get_first_ready_zkp_batch().unwrap(), + i / batch.zkp_batch_size + ); + batch.mark_as_inserted_in_merkle_tree(0, 0, 0).unwrap(); + } else if i >= batch.batch_size { + assert_eq!( + batch.get_first_ready_zkp_batch(), + Err(AccountCompressionErrorCode::BatchAlreadyInserted.into()) + ); + } else { + assert_eq!( + batch.get_first_ready_zkp_batch(), + Err(AccountCompressionErrorCode::BatchNotReady.into()) + ); + } + } + } } diff --git a/programs/account-compression/src/state/batched_merkle_tree.rs b/programs/account-compression/src/state/batched_merkle_tree.rs index 6b7efd8351..09699631fd 100644 --- a/programs/account-compression/src/state/batched_merkle_tree.rs +++ b/programs/account-compression/src/state/batched_merkle_tree.rs @@ -4,16 +4,21 @@ use crate::{ bytes_to_struct_checked, errors::AccountCompressionErrorCode, utils::{ - check_signer_is_registered_or_authority::GroupAccess, constants::TEST_DEFAULT_BATCH_SIZE, + check_signer_is_registered_or_authority::GroupAccess, + constants::{ADDRESS_TREE_INIT_ROOT_26, TEST_DEFAULT_BATCH_SIZE}, }, - InitStateTreeAccountsInstructionData, + InitAddressTreeAccountsInstructionData, InitStateTreeAccountsInstructionData, }; use aligned_sized::aligned_sized; use anchor_lang::prelude::*; use borsh::{BorshDeserialize, BorshSerialize}; use light_bounded_vec::{BoundedVec, CyclicBoundedVec, CyclicBoundedVecMetadata}; use light_hasher::{Hasher, Poseidon}; -use light_verifier::{verify_batch_append_with_proofs, verify_batch_update, CompressedProof}; +use light_utils::fee::compute_rollover_fee; +use light_verifier::{ + verify_batch_address_update, verify_batch_append_with_proofs, verify_batch_update, + CompressedProof, +}; use std::mem::{size_of, ManuallyDrop}; use super::{ @@ -117,6 +122,83 @@ impl BatchedMerkleTreeAccount { associated_queue: Pubkey, height: u32, num_batches: u64, + ) -> Self { + Self::get_tree_default( + TreeType::BatchedState, + owner, + program_owner, + forester, + rollover_threshold, + index, + network_fee, + batch_size, + zkp_batch_size, + bloom_filter_capacity, + root_history_capacity, + associated_queue, + height, + num_batches, + 0, + ) + } + pub fn get_address_tree_default( + owner: Pubkey, + program_owner: Option, + forester: Option, + rollover_threshold: Option, + index: u64, + network_fee: u64, + batch_size: u64, + zkp_batch_size: u64, + bloom_filter_capacity: u64, + root_history_capacity: u32, + height: u32, + num_batches: u64, + rent: u64, + ) -> Self { + let rollover_fee = match rollover_threshold { + Some(rollover_threshold) => { + compute_rollover_fee(rollover_threshold, height, rent).unwrap() + } + None => 0, + }; + let mut tree = Self::get_tree_default( + TreeType::BatchedAddress, + owner, + program_owner, + forester, + rollover_threshold, + index, + network_fee, + batch_size, + zkp_batch_size, + bloom_filter_capacity, + root_history_capacity, + Pubkey::default(), + height, + num_batches, + rollover_fee, + ); + // inited address tree contains two elements. + tree.next_index = 2; + tree + } + pub fn get_tree_default( + tree_type: TreeType, + owner: Pubkey, + program_owner: Option, + forester: Option, + rollover_threshold: Option, + index: u64, + network_fee: u64, + batch_size: u64, + zkp_batch_size: u64, + bloom_filter_capacity: u64, + root_history_capacity: u32, + associated_queue: Pubkey, + height: u32, + num_batches: u64, + rollover_fee: u64, ) -> Self { Self { metadata: MerkleTreeMetadata { @@ -124,7 +206,7 @@ impl BatchedMerkleTreeAccount { access_metadata: AccessMetadata::new(owner, program_owner, forester), rollover_metadata: RolloverMetadata::new( index, - 0, + rollover_fee, rollover_threshold, network_fee, None, @@ -133,7 +215,7 @@ impl BatchedMerkleTreeAccount { associated_queue, }, sequence_number: 0, - tree_type: TreeType::BatchedState as u64, + tree_type: tree_type as u64, next_index: 0, height, root_history_capacity, @@ -205,10 +287,12 @@ impl ZeroCopyBatchedMerkleTreeAccount { return err!(ErrorCode::AccountNotMutable); } let account_data = &mut account_info.try_borrow_mut_data()?; - let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data)?; + let merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut(account_data)?; Ok(merkle_tree) } + // TODO: add failing test pub fn state_tree_from_bytes_mut( account_data: &mut [u8], ) -> Result { @@ -219,7 +303,34 @@ impl ZeroCopyBatchedMerkleTreeAccount { Ok(merkle_tree) } - pub fn from_bytes_mut(account_data: &mut [u8]) -> Result { + pub fn address_tree_from_account_info_mut( + account_info: &AccountInfo<'_>, + ) -> Result { + if *account_info.owner != crate::ID { + return err!(ErrorCode::AccountOwnedByWrongProgram); + } + if !account_info.is_writable { + return err!(ErrorCode::AccountNotMutable); + } + let account_data = &mut account_info.try_borrow_mut_data()?; + + let merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut(account_data)?; + Ok(merkle_tree) + } + + // TODO: add failing test + pub fn address_tree_from_bytes_mut( + account_data: &mut [u8], + ) -> Result { + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data)?; + if merkle_tree.get_account().tree_type != TreeType::BatchedAddress as u64 { + return err!(AccountCompressionErrorCode::InvalidTreeType); + } + Ok(merkle_tree) + } + + fn from_bytes_mut(account_data: &mut [u8]) -> Result { unsafe { let account = bytes_to_struct_checked::(account_data)?; if account_data.len() != (*account).size()? { @@ -256,13 +367,14 @@ impl ZeroCopyBatchedMerkleTreeAccount { account_data: &mut [u8], num_iters: u64, bloom_filter_capacity: u64, + tree_type: TreeType, ) -> Result { unsafe { let account = bytes_to_struct_checked::(account_data)?; (*account).metadata = metadata; (*account).root_history_capacity = root_history_capacity; (*account).height = height; - (*account).tree_type = TreeType::BatchedState as u64; + (*account).tree_type = tree_type as u64; (*account).queue.init( num_batches_input_queue, input_queue_batch_size, @@ -284,8 +396,13 @@ impl ZeroCopyBatchedMerkleTreeAccount { false, ) .map_err(ProgramError::from)?; - root_history.push(light_hasher::Poseidon::zero_bytes()[height as usize]); - + if tree_type == TreeType::BatchedState { + root_history.push(light_hasher::Poseidon::zero_bytes()[height as usize]); + } else if tree_type == TreeType::BatchedAddress { + // Initialized indexed Merkle tree root + root_history.push(ADDRESS_TREE_INIT_ROOT_26); + (*account).next_index = 2; + } let (batches, value_vecs, bloom_filter_stores, hashchain_store) = init_queue( &(*account).queue, QueueType::Input as u64, @@ -293,6 +410,7 @@ impl ZeroCopyBatchedMerkleTreeAccount { num_iters, bloom_filter_capacity, &mut start_offset, + (*account).next_index, )?; Ok(ZeroCopyBatchedMerkleTreeAccount { account, @@ -322,13 +440,8 @@ impl ZeroCopyBatchedMerkleTreeAccount { let batches = &mut queue_account.batches; let full_batch = batches.get_mut(batch_index as usize).unwrap(); - if full_batch.get_state() != BatchState::ReadyToUpdateTree { - msg!("Queue is in invalid state: {:?}", full_batch.get_state()); - return err!(AccountCompressionErrorCode::BatchAlreadyInserted); - } - let new_root = instruction_data.public_inputs.new_root; - let num_zkps = full_batch.get_num_inserted_zkps(); + let num_zkps = full_batch.get_first_ready_zkp_batch()?; let leaves_hashchain = queue_account .hashchain_store @@ -381,16 +494,28 @@ impl ZeroCopyBatchedMerkleTreeAccount { &mut self, instruction_data: InstructionDataBatchNullifyInputs, id: [u8; 32], + ) -> Result { + self._update_input_queue::<3>(instruction_data, id) + } + + pub fn update_address_queue( + &mut self, + instruction_data: InstructionDataBatchNullifyInputs, + id: [u8; 32], + ) -> Result { + self._update_input_queue::<4>(instruction_data, id) + } + + fn _update_input_queue( + &mut self, + instruction_data: InstructionDataBatchNullifyInputs, + id: [u8; 32], ) -> Result { let batch_index = self.get_account().queue.next_full_batch_index; let full_batch = self.batches.get(batch_index as usize).unwrap(); - if full_batch.get_state() != BatchState::ReadyToUpdateTree { - msg!("Queue is in invalid state: {:?}", full_batch.get_state()); - return err!(AccountCompressionErrorCode::BatchAlreadyInserted); - } - let num_zkps = full_batch.get_num_inserted_zkps(); + let num_zkps = full_batch.get_first_ready_zkp_batch()?; let leaves_hashchain = self .hashchain_store @@ -404,10 +529,18 @@ impl ZeroCopyBatchedMerkleTreeAccount { .unwrap(); let new_root = instruction_data.public_inputs.new_root; - let public_input_hash = create_hash_chain([*old_root, new_root, *leaves_hashchain])?; + let public_input_hash = if QUEUE_TYPE == QueueType::Input as u64 { + create_hash_chain([*old_root, new_root, *leaves_hashchain])? + } else if QUEUE_TYPE == QueueType::Address as u64 { + let mut next_index_bytes = [0u8; 32]; + next_index_bytes[24..] + .copy_from_slice(self.get_account().next_index.to_be_bytes().as_slice()); + create_hash_chain([*old_root, new_root, *leaves_hashchain, next_index_bytes])? + } else { + return err!(AccountCompressionErrorCode::InvalidQueueType); + }; let circuit_batch_size = self.get_account().queue.zkp_batch_size; - let sequence_number = self.get_account().sequence_number; - self.update::<3>( + self.update::( circuit_batch_size as usize, instruction_data.compressed_proof, public_input_hash, @@ -415,19 +548,24 @@ impl ZeroCopyBatchedMerkleTreeAccount { self.root_history.push(new_root); let root_history_capacity = self.get_account().root_history_capacity; + let sequence_number = self.get_account().sequence_number; let full_batch = self.batches.get_mut(batch_index as usize).unwrap(); full_batch.mark_as_inserted_in_merkle_tree( sequence_number, self.root_history.last_index() as u32, root_history_capacity, )?; - // TODO(optimization): search for bloom_filter that can be cleared - if full_batch.get_state() == BatchState::Inserted { let account = self.get_account_mut(); account.queue.next_full_batch_index += 1; account.queue.next_full_batch_index %= account.queue.num_batches; } + if QUEUE_TYPE == QueueType::Address as u64 { + self.get_account_mut().next_index += circuit_batch_size; + } + + self.wipe_previous_batch_bloom_filter()?; + Ok(BatchNullifyEvent { id, batch_index, @@ -451,6 +589,9 @@ impl ZeroCopyBatchedMerkleTreeAccount { } else if QUEUE_TYPE == QueueType::Input as u64 { verify_batch_update(batch_size, public_input_hash, &proof) .map_err(ProgramError::from)?; + } else if QUEUE_TYPE == QueueType::Address as u64 { + verify_batch_address_update(batch_size, public_input_hash, &proof) + .map_err(ProgramError::from)?; } else { return err!(AccountCompressionErrorCode::InvalidQueueType); } @@ -469,12 +610,22 @@ impl ZeroCopyBatchedMerkleTreeAccount { leaf_index: u64, tx_hash: &[u8; 32], ) -> Result<()> { + if self.get_account().tree_type != TreeType::BatchedState as u64 { + return err!(AccountCompressionErrorCode::InvalidTreeType); + } let leaf_index_bytes = leaf_index.to_be_bytes(); let nullifier = Poseidon::hashv(&[compressed_account_hash, &leaf_index_bytes, tx_hash]) .map_err(ProgramError::from)?; self.insert_into_current_batch(compressed_account_hash, &nullifier) } + pub fn insert_address_into_current_batch(&mut self, address: &[u8; 32]) -> Result<()> { + if self.get_account().tree_type != TreeType::BatchedAddress as u64 { + return err!(AccountCompressionErrorCode::InvalidTreeType); + } + self.insert_into_current_batch(address, address) + } + fn insert_into_current_batch( &mut self, bloom_filter_value: &[u8; 32], @@ -535,25 +686,77 @@ impl ZeroCopyBatchedMerkleTreeAccount { // If the sequence number is greater than current sequence number // there is still at least one root which can be used to prove // inclusion of a value which was in the batch that was just wiped. - if sequence_number > self.get_account().sequence_number { - // advance root history array current index from latest root - // to root_index and overwrite all roots with zeros - if let Some(root_index) = root_index { - let root_index = root_index as usize; - let start = self.root_history.last_index(); - let end = self.root_history.len() + root_index; - for index in start + 1..end { - let index = index % self.root_history.len(); - if index == root_index { - break; - } - let root = self.root_history.get_mut(index).unwrap(); - *root = [0u8; 32]; - } + self.zero_out_roots(sequence_number, root_index); + } + } + Ok(()) + } + + fn zero_out_roots(&mut self, sequence_number: u64, root_index: Option) { + if sequence_number > self.get_account().sequence_number { + println!("zeroing out roots"); + // advance root history array current index from latest root + // to root_index and overwrite all roots with zeros + if let Some(root_index) = root_index { + let root_index = root_index as usize; + let start = self.root_history.last_index(); + let end = self.root_history.len() + root_index; + for index in start + 1..end { + let index = index % self.root_history.len(); + if index == root_index { + break; } + let root = self.root_history.get_mut(index).unwrap(); + *root = [0u8; 32]; } } } + } + + /// Wipe bloom filter after a batch has been inserted and 50% of the + /// subsequent batch been processed. + /// 1. Previous batch must be inserted and bloom filter must not be wiped. + /// 2. Current batch must be 50% full + /// 3. if yes + /// 3.1 zero out bloom filter + /// 3.2 mark bloom filter as wiped + /// 3.3 zero out roots if needed + pub fn wipe_previous_batch_bloom_filter(&mut self) -> Result<()> { + let current_batch = self.get_account().queue.currently_processing_batch_index; + let batch_size = self.get_account().queue.batch_size; + let previous_full_batch_index = self + .get_account() + .queue + .next_full_batch_index + .saturating_sub(1) as usize; + let num_inserted_elements = self + .batches + .get(current_batch as usize) + .unwrap() + .get_num_inserted_elements(); + let previous_full_batch = self.batches.get_mut(previous_full_batch_index).unwrap(); + println!( + "wipe_previous_batch_bloom_filter: current_batch: {}, previous_full_batch_index: {}, num_inserted_elements: {}", + current_batch, previous_full_batch_index, num_inserted_elements + ); + if previous_full_batch.get_state() == BatchState::Inserted + && batch_size / 2 > num_inserted_elements + && !previous_full_batch.bloom_filter_is_wiped + { + println!("wiping bloom filter index {}", previous_full_batch_index); + let bloom_filter = self + .bloom_filter_stores + .get_mut(previous_full_batch_index) + .unwrap(); + bloom_filter.as_mut_slice().iter_mut().for_each(|x| *x = 0); + previous_full_batch.bloom_filter_is_wiped = true; + let seq = previous_full_batch.sequence_number; + let root_index = previous_full_batch.root_index; + self.zero_out_roots(seq, Some(root_index)); + } else { + println!("not wiping bloom filter"); + } + Ok(()) } @@ -581,6 +784,14 @@ pub fn create_hash_chain_from_vec(inputs: Vec<[u8; 32]>) -> Result<[u8; 32]> { Ok(hash_chain) } +pub fn create_hash_chain_from_slice(inputs: &[[u8; 32]]) -> Result<[u8; 32]> { + let mut hash_chain = inputs[0]; + for input in inputs.iter().skip(1) { + hash_chain = Poseidon::hashv(&[&hash_chain, input]).map_err(ProgramError::from)?; + } + Ok(hash_chain) +} + pub fn get_merkle_tree_account_size_default() -> usize { let mt_account = BatchedMerkleTreeAccount { metadata: MerkleTreeMetadata::default(), @@ -602,7 +813,7 @@ pub fn get_merkle_tree_account_size_default() -> usize { mt_account.size().unwrap() } -pub fn get_merkle_tree_account_size_from_params( +pub fn get_state_merkle_tree_account_size_from_params( params: InitStateTreeAccountsInstructionData, ) -> usize { get_merkle_tree_account_size( @@ -614,6 +825,18 @@ pub fn get_merkle_tree_account_size_from_params( params.input_queue_num_batches, ) } +pub fn get_address_merkle_tree_account_size_from_params( + params: InitAddressTreeAccountsInstructionData, +) -> usize { + get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ) +} pub fn get_merkle_tree_account_size( batch_size: u64, @@ -706,8 +929,13 @@ mod tests { use light_merkle_tree_reference::MerkleTree; use light_prover_client::{ gnark::helpers::{spawn_prover, ProofType, ProverConfig}, - mock_batched_forester::{self, MockBatchedForester, MockTxEvent}, + helpers::bigint_to_u8_32, + mock_batched_forester::{ + self, MockBatchedAddressForester, MockBatchedForester, MockTxEvent, + }, }; + use num_bigint::BigInt; + use num_traits::zero; use serial_test::serial; use std::{cmp::min, ops::Deref}; @@ -719,10 +947,45 @@ mod tests { get_output_queue_account_size_default, get_output_queue_account_size_from_params, BatchedQueueAccount, }, - init_batched_state_merkle_tree_accounts, + init_batched_address_merkle_tree_account, init_batched_state_merkle_tree_accounts, }; use super::*; + + pub fn assert_nullifier_queue_insert( + mut pre_account: BatchedMerkleTreeAccount, + mut pre_batches: ManuallyDrop>, + pre_value_vecs: &mut Vec>>, + pre_roots: Vec<[u8; 32]>, + mut pre_hashchains: Vec>>, + mut merkle_tree_zero_copy_account: ZeroCopyBatchedMerkleTreeAccount, + bloom_filter_insert_values: Vec<[u8; 32]>, + leaf_indices: Vec, + tx_hash: [u8; 32], + input_is_in_tree: Vec, + array_indices: Vec, + ) -> Result<()> { + let mut leaf_hashchain_insert_values = vec![]; + for (insert_value, leaf_index) in bloom_filter_insert_values.iter().zip(leaf_indices.iter()) + { + let nullifier = + Poseidon::hashv(&[insert_value.as_slice(), &leaf_index.to_be_bytes(), &tx_hash]) + .unwrap(); + leaf_hashchain_insert_values.push(nullifier); + } + assert_input_queue_insert( + pre_account, + pre_batches, + pre_value_vecs, + pre_roots, + pre_hashchains, + merkle_tree_zero_copy_account, + bloom_filter_insert_values, + leaf_hashchain_insert_values, + input_is_in_tree, + array_indices, + ) + } /// Insert into input queue: /// 1. New value exists in the current batch bloom_filter /// 2. New value does not exist in the other batch bloom_filters @@ -734,13 +997,13 @@ mod tests { pre_roots: Vec<[u8; 32]>, mut pre_hashchains: Vec>>, mut merkle_tree_zero_copy_account: ZeroCopyBatchedMerkleTreeAccount, - insert_values: Vec<[u8; 32]>, - leaf_indices: Vec, - tx_hash: [u8; 32], + bloom_filter_insert_values: Vec<[u8; 32]>, + leaf_hashchain_insert_values: Vec<[u8; 32]>, input_is_in_tree: Vec, array_indices: Vec, ) -> Result<()> { - for (i, insert_value) in insert_values.iter().enumerate() { + let mut should_be_wiped = false; + for (i, insert_value) in bloom_filter_insert_values.iter().enumerate() { if !input_is_in_tree[i] { let value_vec_index = array_indices[i]; assert!( @@ -762,8 +1025,6 @@ mod tests { ); } - let leaf_index = leaf_indices[i]; - let post_roots: Vec<[u8; 32]> = merkle_tree_zero_copy_account .root_history .iter() @@ -786,13 +1047,38 @@ mod tests { .currently_processing_batch_index as usize; let inserted_batch_index = pre_account.queue.currently_processing_batch_index as usize; let expected_batch = pre_batches.get_mut(inserted_batch_index).unwrap(); + println!( + "assert input queue batch update: expected_batch: {:?}", + expected_batch + ); + println!( + "assert input queue batch update: expected_batch.get_num_inserted_elements(): {}", + expected_batch.get_num_inserted_elements() + ); + println!( + "assert input queue batch update: expected_batch.batch_size / 2: {}", + expected_batch.batch_size / 2 + ); + if !should_be_wiped && expected_batch.get_state() == BatchState::Inserted { + should_be_wiped = + expected_batch.get_num_inserted_elements() == expected_batch.batch_size / 2; + } + println!( + "assert input queue batch update: should_be_wiped: {}", + should_be_wiped + ); if expected_batch.get_state() == BatchState::Inserted { + println!("assert input queue batch update: clearing batch"); pre_hashchains[inserted_batch_index].clear(); expected_batch.sequence_number = 0; expected_batch.advance_state_to_can_be_filled().unwrap(); + expected_batch.bloom_filter_is_wiped = false; } - + println!( + "assert input queue batch update: inserted_batch_index: {}", + inserted_batch_index + ); // New value exists in the current batch bloom filter let mut bloom_filter = light_bloom_filter::BloomFilter::new( merkle_tree_zero_copy_account.batches[inserted_batch_index].num_iters as usize, @@ -801,12 +1087,15 @@ mod tests { .as_mut_slice(), ) .unwrap(); + println!( + "assert input queue batch update: insert_value: {:?}", + insert_value + ); assert!(bloom_filter.contains(&insert_value)); let mut pre_hashchain = pre_hashchains.get_mut(inserted_batch_index).unwrap(); - let nullifier = - Poseidon::hashv(&[insert_value.as_slice(), &leaf_index.to_be_bytes(), &tx_hash]) - .unwrap(); - expected_batch.add_to_hash_chain(&nullifier, &mut pre_hashchain)?; + + expected_batch + .add_to_hash_chain(&leaf_hashchain_insert_values[i], &mut pre_hashchain)?; // New value does not exist in the other batch bloom_filters for (i, batch) in merkle_tree_zero_copy_account.batches.iter_mut().enumerate() { @@ -830,7 +1119,7 @@ mod tests { merkle_tree_zero_copy_account.batches [pre_account.queue.currently_processing_batch_index as usize] .get_state(), - BatchState::ReadyToUpdateTree + BatchState::Full ); pre_account.queue.currently_processing_batch_index += 1; pre_account.queue.currently_processing_batch_index %= pre_account.queue.num_batches; @@ -855,10 +1144,18 @@ mod tests { ); let inserted_batch_index = pre_account.queue.currently_processing_batch_index as usize; let mut expected_batch = pre_batches[inserted_batch_index].clone(); + if should_be_wiped { + expected_batch.bloom_filter_is_wiped = true; + } assert_eq!( merkle_tree_zero_copy_account.batches[inserted_batch_index], expected_batch ); + let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; + assert_eq!( + merkle_tree_zero_copy_account.batches[other_batch], + pre_batches[other_batch] + ); assert_eq!( merkle_tree_zero_copy_account.hashchain_store, *pre_hashchains, "Hashchain store inconsistent." @@ -921,7 +1218,7 @@ mod tests { output_zero_copy_account.batches [pre_account.queue.currently_processing_batch_index as usize] .get_state() - == BatchState::ReadyToUpdateTree + == BatchState::Full ); pre_account.queue.currently_processing_batch_index += 1; pre_account.queue.currently_processing_batch_index %= pre_account.queue.num_batches; @@ -963,7 +1260,8 @@ mod tests { let mut output_zero_copy_account = ZeroCopyBatchedQueueAccount::from_bytes_mut(output_queue_account_data).unwrap(); let mut merkle_tree_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(merkle_tree_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut(merkle_tree_account_data) + .unwrap(); let flattened_inputs = instruction_data .inputs .iter() @@ -1161,7 +1459,10 @@ mod tests { }; let merkle_tree_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); println!( "input queue: {:?}", merkle_tree_zero_copy_account.batches[0].get_num_inserted() @@ -1197,9 +1498,12 @@ mod tests { if !inputs.is_empty() { let merkle_tree_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_data) - .unwrap(); - assert_input_queue_insert( + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut pre_mt_data, + ) + .unwrap(); + println!("inputs: {:?}", inputs); + assert_nullifier_queue_insert( pre_mt_account, pre_batches, &mut pre_output_value_stores, @@ -1240,15 +1544,18 @@ mod tests { num_output_values += number_of_outputs; num_input_values += number_of_inputs; let merkle_tree_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut pre_mt_data, + ) + .unwrap(); in_ready_for_update = merkle_tree_zero_copy_account .batches .iter() - .any(|batch| batch.get_state() == BatchState::ReadyToUpdateTree); + .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); out_ready_for_update = output_zero_copy_account .batches .iter() - .any(|batch| batch.get_state() == BatchState::ReadyToUpdateTree); + .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); mt_account_data = pre_mt_data.clone(); } else { @@ -1264,11 +1571,16 @@ mod tests { println!("Num output values: {}", num_output_values); let mut pre_mt_account_data = mt_account_data.clone(); let old_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); let (input_res, new_root) = { let mut zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_account_data) - .unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut pre_mt_account_data, + ) + .unwrap(); println!("batches {:?}", zero_copy_account.batches); let old_root_index = zero_copy_account.root_history.last_index(); @@ -1335,8 +1647,10 @@ mod tests { // One root changed one didn't let zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_account_data) - .unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut pre_mt_account_data, + ) + .unwrap(); assert_nullify_event(nullify_event, new_root, &old_zero_copy_account, mt_pubkey); assert_merkle_tree_update( old_zero_copy_account, @@ -1359,8 +1673,10 @@ mod tests { let mut pre_mt_account_data = mt_account_data.clone(); let mut zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_account_data) - .unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut pre_mt_account_data, + ) + .unwrap(); let output_zero_copy_account = ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) .unwrap(); @@ -1423,7 +1739,10 @@ mod tests { .unwrap(); let old_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); println!("batch 0: {:?}", output_zero_copy_account.batches[0]); println!("batch 1: {:?}", output_zero_copy_account.batches[1]); @@ -1565,13 +1884,16 @@ mod tests { out_ready_for_update = output_zero_copy_account .batches .iter() - .any(|batch| batch.get_state() == BatchState::ReadyToUpdateTree); + .any(|batch| batch.get_state() == BatchState::Full); } // Input queue { let mut merkle_tree_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); if rng.gen_bool(0.5) && !mock_indexer.active_leaves.is_empty() { println!("Input insert -----------------------------"); @@ -1605,9 +1927,11 @@ mod tests { { let merkle_tree_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) - .unwrap(); - assert_input_queue_insert( + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + assert_nullifier_queue_insert( pre_account, pre_batches, &mut vec![], @@ -1628,7 +1952,7 @@ mod tests { in_ready_for_update = merkle_tree_zero_copy_account .batches .iter() - .any(|batch| batch.get_state() == BatchState::ReadyToUpdateTree); + .any(|batch| batch.get_state() == BatchState::Full); } if in_ready_for_update { @@ -1654,8 +1978,10 @@ mod tests { println!("Num output values: {}", num_output_values); let mut pre_mt_account_data = mt_account_data.clone(); let mut zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_account_data) - .unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut pre_mt_account_data, + ) + .unwrap(); let output_zero_copy_account = ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) .unwrap(); @@ -1741,7 +2067,10 @@ mod tests { .unwrap(); let old_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); println!("batch 0: {:?}", output_zero_copy_account.batches[0]); println!("batch 1: {:?}", output_zero_copy_account.batches[1]); @@ -1776,12 +2105,14 @@ mod tests { mt_pubkey: Pubkey, ) { let mut cloned_mt_account_data = (*mt_account_data).to_vec(); - let old_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(cloned_mt_account_data.as_mut_slice()) - .unwrap(); + let old_zero_copy_account = ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + cloned_mt_account_data.as_mut_slice(), + ) + .unwrap(); let (input_res, root) = { let mut zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut(mt_account_data) + .unwrap(); let old_root_index = zero_copy_account.root_history.last_index(); let next_full_batch = zero_copy_account.get_account().queue.next_full_batch_index; @@ -1831,48 +2162,141 @@ mod tests { // One root changed one didn't let zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut(mt_account_data).unwrap(); if enable_assert { assert_merkle_tree_update(old_zero_copy_account, zero_copy_account, None, None, root); } } - fn assert_merkle_tree_update( - old_zero_copy_account: ZeroCopyBatchedMerkleTreeAccount, - zero_copy_account: ZeroCopyBatchedMerkleTreeAccount, - old_queue_account: Option, - queue_account: Option, - root: [u8; 32], + pub async fn perform_address_update( + mt_account_data: &mut [u8], + mock_indexer: &mut MockBatchedAddressForester<26>, + enable_assert: bool, + mt_pubkey: Pubkey, ) { - let mut expected_account = old_zero_copy_account.get_account().clone(); - expected_account.sequence_number += 1; - let actual_account = zero_copy_account.get_account().clone(); - - let ( - batches, - previous_batchs, - previous_processing, - expected_queue_account, - mut next_full_batch_index, - ) = if let Some(queue_account) = queue_account.as_ref() { - let expected_queue_account = old_queue_account.as_ref().unwrap().get_account().clone(); + println!("pre address update -----------------------------"); + let mut cloned_mt_account_data = (*mt_account_data).to_vec(); + let old_zero_copy_account = ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + cloned_mt_account_data.as_mut_slice(), + ) + .unwrap(); + let (input_res, root, pre_next_full_batch) = { + let mut zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut(mt_account_data) + .unwrap(); + + let old_root_index = zero_copy_account.root_history.last_index(); + let next_full_batch = zero_copy_account.get_account().queue.next_full_batch_index; + let next_index = zero_copy_account.get_account().next_index; + println!("next index {:?}", next_index); + let batch = zero_copy_account + .batches + .get(next_full_batch as usize) + .unwrap(); + let batch_start_index = batch.start_index; + let leaves_hashchain = zero_copy_account + .hashchain_store + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let current_root = zero_copy_account.root_history.last().unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_address_proof( + zero_copy_account.get_account().queue.batch_size as u32, + zero_copy_account.get_account().queue.zkp_batch_size as u32, + *leaves_hashchain, + next_index as usize, + batch_start_index as usize, + *current_root, + ) + .await + .unwrap(); + let instruction_data = InstructionDataBatchNullifyInputs { + public_inputs: BatchProofInputsIx { + new_root, + old_root_index: old_root_index as u16, + }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; - let previous_processing = if queue_account - .get_account() - .queue - .currently_processing_batch_index - == 0 - { - queue_account.get_account().queue.num_batches - 1 - } else { - queue_account - .get_account() - .queue - .currently_processing_batch_index - - 1 - }; - expected_account.next_index += queue_account.batches.get(0).unwrap().zkp_batch_size; - let next_full_batch_index = expected_queue_account.queue.next_full_batch_index; + ( + zero_copy_account.update_address_queue(instruction_data, mt_pubkey.to_bytes()), + new_root, + next_full_batch, + ) + }; + println!("post address update -----------------------------"); + println!("res {:?}", input_res); + assert!(input_res.is_ok()); + let event = input_res.unwrap(); + + // assert Merkle tree + // sequence number increased X + // next index increased X + // current root index increased X + // One root changed one didn't + + let zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut(mt_account_data).unwrap(); + + { + let next_full_batch = zero_copy_account.get_account().queue.next_full_batch_index; + let batch = zero_copy_account + .batches + .get(next_full_batch as usize) + .unwrap(); + // println!("batch {:?}", batch); + // println!("account state {:?}", batch.get_state()); + if pre_next_full_batch != next_full_batch { + mock_indexer.finalize_batch_address_update(batch.batch_size as usize); + } + } + if enable_assert { + assert_merkle_tree_update(old_zero_copy_account, zero_copy_account, None, None, root); + } + } + + fn assert_merkle_tree_update( + old_zero_copy_account: ZeroCopyBatchedMerkleTreeAccount, + zero_copy_account: ZeroCopyBatchedMerkleTreeAccount, + old_queue_account: Option, + queue_account: Option, + root: [u8; 32], + ) { + let mut expected_account = old_zero_copy_account.get_account().clone(); + expected_account.sequence_number += 1; + let actual_account = zero_copy_account.get_account().clone(); + + let ( + batches, + previous_batchs, + previous_processing, + expected_queue_account, + mut next_full_batch_index, + ) = if let Some(queue_account) = queue_account.as_ref() { + let expected_queue_account = old_queue_account.as_ref().unwrap().get_account().clone(); + + let previous_processing = if queue_account + .get_account() + .queue + .currently_processing_batch_index + == 0 + { + queue_account.get_account().queue.num_batches - 1 + } else { + queue_account + .get_account() + .queue + .currently_processing_batch_index + - 1 + }; + expected_account.next_index += queue_account.batches.get(0).unwrap().zkp_batch_size; + let next_full_batch_index = expected_queue_account.queue.next_full_batch_index; ( queue_account.batches.clone(), old_queue_account.as_ref().unwrap().batches.clone(), @@ -1881,11 +2305,12 @@ mod tests { next_full_batch_index, ) } else { + // We only have two batches. let previous_processing = if expected_account.queue.currently_processing_batch_index == 0 { - expected_account.queue.num_batches - 1 + 1 } else { - expected_account.queue.currently_processing_batch_index - 1 + 0 }; ( zero_copy_account.batches.clone(), @@ -1899,10 +2324,15 @@ mod tests { let mut checked_one = false; for (i, batch) in batches.iter().enumerate() { let previous_batch = previous_batchs.get(i).unwrap(); - if batch.sequence_number != 0 - && batch.get_state() == BatchState::Inserted - && previous_processing == i as u64 - { + + let expected_sequence_number = zero_copy_account.root_history.capacity() as u64 + + zero_copy_account.get_account().sequence_number; + let batch_fully_inserted = batch.sequence_number == expected_sequence_number + && batch.get_state() == BatchState::Inserted; + + let updated_batch = previous_batch.get_first_ready_zkp_batch().is_ok() && !checked_one; + // Assert fully inserted batch + if batch_fully_inserted { if queue_account.is_some() { next_full_batch_index += 1; next_full_batch_index %= expected_queue_account.unwrap().queue.num_batches; @@ -1911,7 +2341,6 @@ mod tests { expected_account.queue.next_full_batch_index %= expected_account.queue.num_batches; } - assert_eq!( batch.root_index as usize, zero_copy_account.root_history.last_index() @@ -1922,7 +2351,9 @@ mod tests { assert_ne!(batch.sequence_number, previous_batch.sequence_number); assert_eq!(batch.get_current_zkp_batch_index(), 0); assert_ne!(batch.get_state(), previous_batch.get_state()); - } else if batch.get_state() == BatchState::ReadyToUpdateTree && !checked_one { + } + // assert updated batch + else if updated_batch { checked_one = true; assert_eq!( batch.get_num_inserted_zkps(), @@ -1934,10 +2365,10 @@ mod tests { assert_eq!(batch.root_index, previous_batch.root_index); assert_eq!( batch.get_current_zkp_batch_index(), - batch.get_num_zkp_batches() + previous_batch.get_current_zkp_batch_index() ); assert_eq!(batch.get_state(), previous_batch.get_state()); - assert_eq!(batch.get_num_inserted(), 0); + assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); } else { assert_eq!(*batch, *previous_batch); } @@ -1996,7 +2427,7 @@ mod tests { let mut output_queue_account_data = vec![0; queue_account_size]; let output_queue_pubkey = Pubkey::new_unique(); - let mt_account_size = get_merkle_tree_account_size_from_params(params); + let mt_account_size = get_state_merkle_tree_account_size_from_params(params); let mut mt_account_data = vec![0; mt_account_size]; let mt_pubkey = Pubkey::new_unique(); @@ -2067,7 +2498,7 @@ mod tests { output_zero_copy_account .batches .iter() - .for_each(|b| assert_eq!(b.get_state(), BatchState::ReadyToUpdateTree)); + .for_each(|b| assert_eq!(b.get_state(), BatchState::Full)); for i in 0..output_zero_copy_account .get_account() @@ -2081,8 +2512,10 @@ mod tests { println!("Num output values: {}", num_output_values); let mut pre_mt_account_data = mt_account_data.clone(); let mut zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_account_data) - .unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut pre_mt_account_data, + ) + .unwrap(); let output_zero_copy_account = ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) .unwrap(); @@ -2152,7 +2585,10 @@ mod tests { .unwrap(); let old_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); output_queue_account_data = pre_output_queue_state; mt_account_data = pre_mt_account_data; @@ -2164,7 +2600,10 @@ mod tests { let mut first_value = [0u8; 32]; for tx in 0..num_tx { let mut merkle_tree_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); println!("Input insert -----------------------------"); let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); @@ -2187,6 +2626,8 @@ mod tests { outputs: vec![], tx_hash, }); + println!("leaf {:?}", leaf); + println!("leaf_index {:?}", leaf_index); merkle_tree_zero_copy_account .insert_nullifier_into_current_batch( &leaf.to_vec().try_into().unwrap(), @@ -2194,7 +2635,7 @@ mod tests { &tx_hash, ) .unwrap(); - assert_input_queue_insert( + assert_nullifier_queue_insert( pre_account, pre_batches, &mut vec![], @@ -2215,8 +2656,10 @@ mod tests { // subsequent tests let mut mt_account_data = mt_account_data.clone(); let mut merkle_tree_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) - .unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); let result = merkle_tree_zero_copy_account.insert_nullifier_into_current_batch( &leaf.to_vec().try_into().unwrap(), leaf_index as u64, @@ -2234,8 +2677,10 @@ mod tests { } else { let mut mt_account_data = mt_account_data.clone(); let mut merkle_tree_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) - .unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); let result = merkle_tree_zero_copy_account.insert_nullifier_into_current_batch( &first_value.to_vec().try_into().unwrap(), leaf_index as u64, @@ -2252,8 +2697,10 @@ mod tests { // Assert input queue is full and doesn't accept more inserts { let merkle_tree_zero_copy_account = - &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) - .unwrap(); + &mut ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); let rnd_bytes = get_rnd_bytes(&mut rng); let tx_hash = get_rnd_bytes(&mut rng); let result = merkle_tree_zero_copy_account @@ -2271,6 +2718,15 @@ mod tests { println!("input update ----------------------------- {}", i); perform_input_update(&mut mt_account_data, &mut mock_indexer, false, mt_pubkey) .await; + if i == 5 { + let merkle_tree_zero_copy_account = + &mut ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + let batch = merkle_tree_zero_copy_account.batches.get(0).unwrap(); + assert!(batch.bloom_filter_is_wiped); + } println!( "performed input queue batched update {} created root {:?}", i, @@ -2280,7 +2736,10 @@ mod tests { first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); } let mut merkle_tree_zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); println!( "root {:?}", merkle_tree_zero_copy_account.root_history.last().unwrap() @@ -2293,18 +2752,27 @@ mod tests { // assert all bloom_filters are inserted { let merkle_tree_zero_copy_account = - &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) - .unwrap(); - for batch in merkle_tree_zero_copy_account.batches.iter() { + &mut ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + for (i, batch) in merkle_tree_zero_copy_account.batches.iter().enumerate() { println!("batch {:?}", batch); assert_eq!(batch.get_state(), BatchState::Inserted); + if i == 0 { + assert!(batch.bloom_filter_is_wiped); + } else { + assert!(!batch.bloom_filter_is_wiped); + } } } // do one insert and expect that roots until merkle_tree_zero_copy_account.batches[0].root_index are zero { let merkle_tree_zero_copy_account = - &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) - .unwrap(); + &mut ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); let pre_batch_zero = merkle_tree_zero_copy_account .batches .get(0) @@ -2378,4 +2846,270 @@ mod tests { } } // TODO: add test that we cannot insert a batch that is not ready + + #[serial] + #[tokio::test] + async fn test_fill_address_tree_completely() { + spawn_prover( + true, + ProverConfig { + run_mode: None, + circuits: vec![ProofType::BatchAddressAppendTest], + }, + ) + .await; + let roothistory_capacity = vec![17, 80]; // + for root_history_capacity in roothistory_capacity { + let mut mock_indexer = + mock_batched_forester::MockBatchedAddressForester::<26>::default(); + + let mut params = crate::InitAddressTreeAccountsInstructionData::test_default(); + // Root history capacity which is greater than the input updates + params.root_history_capacity = root_history_capacity; + + let owner = Pubkey::new_unique(); + + let mt_account_size = get_address_merkle_tree_account_size_from_params(params); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let merkle_tree_rent = 1_000_000_000; + + init_batched_address_merkle_tree_account( + owner, + params, + &mut mt_account_data, + merkle_tree_rent, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + let mut in_ready_for_update = false; + let mut out_ready_for_update = false; + let mut num_output_updates = 0; + let mut num_input_updates = 0; + let mut num_input_values = 0; + let mut num_output_values = 0; + + let num_tx = params.input_queue_num_batches * params.input_queue_batch_size; + let mut first_value = [0u8; 32]; + for tx in 0..num_tx { + let mut merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + + println!("Input insert -----------------------------"); + let mut rnd_address = get_rnd_bytes(&mut rng); + rnd_address[0] = 0; + + let pre_batches: ManuallyDrop> = + merkle_tree_zero_copy_account.batches.clone(); + let pre_account = merkle_tree_zero_copy_account.get_account().clone(); + let pre_roots = merkle_tree_zero_copy_account + .root_history + .iter() + .cloned() + .collect(); + let pre_hashchains = merkle_tree_zero_copy_account.hashchain_store.clone(); + + merkle_tree_zero_copy_account + .insert_address_into_current_batch(&rnd_address) + .unwrap(); + assert_input_queue_insert( + pre_account, + pre_batches, + &mut vec![], + pre_roots, + pre_hashchains, + merkle_tree_zero_copy_account, + vec![rnd_address], + vec![rnd_address], + vec![true], + vec![], + ) + .unwrap(); + mock_indexer.queue_leaves.push(rnd_address); + + // Insert the same value twice + { + // copy data so that failing test doesn't affect the state of + // subsequent tests + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + let result = merkle_tree_zero_copy_account + .insert_address_into_current_batch(&rnd_address); + result.unwrap_err(); + // assert_eq!( + // result.unwrap_err(), + // AccountCompressionErrorCode::BatchInsertFailed.into() + // ); + } + // Try to insert first value into any batch + if tx == 0 { + first_value = rnd_address; + } else { + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + + let result = merkle_tree_zero_copy_account.insert_address_into_current_batch( + &first_value.to_vec().try_into().unwrap(), + ); + // assert_eq!( + // result.unwrap_err(), + // AccountCompressionErrorCode::BatchInsertFailed.into() + // ); + result.unwrap_err(); + // assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); + } + } + // Assert input queue is full and doesn't accept more inserts + { + let merkle_tree_zero_copy_account = + &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + let rnd_bytes = get_rnd_bytes(&mut rng); + let result = + merkle_tree_zero_copy_account.insert_address_into_current_batch(&rnd_bytes); + assert_eq!( + result.unwrap_err(), + AccountCompressionErrorCode::BatchNotReady.into() + ); + } + // Root of the final batch of first input queue batch + let mut first_input_batch_update_root_value = [0u8; 32]; + let num_updates = params.input_queue_batch_size / params.input_queue_zkp_batch_size + * params.input_queue_num_batches; + for i in 0..num_updates { + println!("address update ----------------------------- {}", i); + perform_address_update(&mut mt_account_data, &mut mock_indexer, false, mt_pubkey) + .await; + if i == 4 { + first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); + } + let mut merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + let batch = merkle_tree_zero_copy_account.batches.get(0).unwrap(); + let batch_one = merkle_tree_zero_copy_account.batches.get(1).unwrap(); + assert!(!batch_one.bloom_filter_is_wiped); + + if i >= 4 { + assert!(batch.bloom_filter_is_wiped); + } else { + assert!(!batch.bloom_filter_is_wiped); + } + } + // assert all bloom_filters are inserted + { + let merkle_tree_zero_copy_account = + &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + for (i, batch) in merkle_tree_zero_copy_account.batches.iter().enumerate() { + assert_eq!(batch.get_state(), BatchState::Inserted); + if i == 0 { + assert!(batch.bloom_filter_is_wiped); + } else { + assert!(!batch.bloom_filter_is_wiped); + } + } + } + // do one insert and expect that roots until merkle_tree_zero_copy_account.batches[0].root_index are zero + { + let merkle_tree_zero_copy_account = + &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut mt_account_data, + ) + .unwrap(); + println!( + "root history {:?}", + merkle_tree_zero_copy_account.root_history + ); + let pre_batch_zero = merkle_tree_zero_copy_account + .batches + .get(0) + .unwrap() + .clone(); + + // let mut address = get_rnd_bytes(&mut rng); + // address[0] = 0; + // merkle_tree_zero_copy_account.insert_address_into_current_batch(&address); + // { + // let post_batch = merkle_tree_zero_copy_account + // .batches + // .get(0) + // .unwrap() + // .clone(); + // assert_eq!(post_batch.get_state(), BatchState::CanBeFilled); + // assert_eq!(post_batch.get_num_inserted(), 1); + // let mut bloom_filter_store = merkle_tree_zero_copy_account + // .bloom_filter_stores + // .get_mut(0) + // .unwrap(); + // let mut bloom_filter = BloomFilter::new( + // params.bloom_filter_num_iters as usize, + // params.bloom_filter_capacity, + // bloom_filter_store.as_mut_slice(), + // ) + // .unwrap(); + // assert!(bloom_filter.contains(&address)); + // } + + let root_history_len = merkle_tree_zero_copy_account + .get_account() + .root_history_capacity; + for root in merkle_tree_zero_copy_account.root_history.iter() { + println!("root {:?}", root); + } + println!( + "root in root index {:?}", + merkle_tree_zero_copy_account.root_history[pre_batch_zero.root_index as usize] + ); + // check that all roots have been overwritten except the root index + // of the update + let root_history_len: u32 = merkle_tree_zero_copy_account.root_history.len() as u32; + let start = merkle_tree_zero_copy_account.root_history.last_index() as u32; + println!("start {:?}", start); + for root in start + 1..pre_batch_zero.root_index + root_history_len { + println!("actual index {:?}", root); + let index = root % root_history_len; + + if index == pre_batch_zero.root_index { + let root_index = pre_batch_zero.root_index as usize; + + assert_eq!( + merkle_tree_zero_copy_account.root_history[root_index], + first_input_batch_update_root_value + ); + assert_eq!( + merkle_tree_zero_copy_account.root_history[root_index - 1], + [0u8; 32] + ); + break; + } + println!("index {:?}", index); + assert_eq!( + merkle_tree_zero_copy_account.root_history[index as usize], + [0u8; 32] + ); + } + } + } + } + // TODO: add test that we cannot insert a batch that is not ready } diff --git a/programs/account-compression/src/state/batched_queue.rs b/programs/account-compression/src/state/batched_queue.rs index f0d2ccabbb..438a14f8c8 100644 --- a/programs/account-compression/src/state/batched_queue.rs +++ b/programs/account-compression/src/state/batched_queue.rs @@ -243,6 +243,7 @@ impl ZeroCopyBatchedQueueAccount { num_iters, bloom_filter_capacity, &mut 0, + 0, )?; Ok(ZeroCopyBatchedQueueAccount { account, @@ -273,6 +274,34 @@ impl ZeroCopyBatchedQueueAccount { Ok(()) } + pub fn prove_inclusion_by_index(&mut self, leaf_index: u64, value: &[u8; 32]) -> Result { + for (batch_index, batch) in self.batches.iter().enumerate() { + if batch.value_is_inserted_in_batch(leaf_index)? { + let index = batch.get_value_index_in_batch(leaf_index)?; + let element = self.value_vecs[batch_index] + .get_mut(index as usize) + .ok_or(AccountCompressionErrorCode::InclusionProofByIndexFailed)?; + + if element == value { + return Ok(true); + } else { + return err!(AccountCompressionErrorCode::InclusionProofByIndexFailed); + } + } + } + Ok(false) + } + + pub fn could_exist_in_batches(&mut self, leaf_index: u64) -> Result<()> { + for batch in self.batches.iter() { + let res = batch.value_is_inserted_in_batch(leaf_index)?; + if res { + return Ok(()); + } + } + err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) + } + /// Zero out a leaf by index if it exists in the queues value vec. If /// checked fail if leaf is not found. pub fn prove_inclusion_by_index_and_zero_out_leaf( @@ -288,14 +317,14 @@ impl ZeroCopyBatchedQueueAccount { .ok_or(AccountCompressionErrorCode::InclusionProofByIndexFailed)?; if element == value { - *element = [0; 32]; + *element = [0u8; 32]; return Ok(()); } else { return err!(AccountCompressionErrorCode::InclusionProofByIndexFailed); } } } - err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) + Ok(()) } pub fn get_batch_num_inserted_in_current_batch(&self) -> u64 { @@ -329,6 +358,7 @@ pub fn insert_into_current_batch( let mut hashchain_store = hashchain_store.get_mut(currently_processing_batch_index); let current_batch = batches.get_mut(currently_processing_batch_index).unwrap(); + println!("current_batch {:?}", current_batch); let mut wipe = false; if current_batch.get_state() == BatchState::Inserted { current_batch.advance_state_to_can_be_filled()?; @@ -337,15 +367,19 @@ pub fn insert_into_current_batch( } wipe = true; } + println!("wipe {:?}", wipe); // We expect to insert into the current batch. - if current_batch.get_state() == BatchState::ReadyToUpdateTree { + if current_batch.get_state() == BatchState::Full { for batch in batches.iter_mut() { msg!("batch {:?}", batch); } return err!(AccountCompressionErrorCode::BatchNotReady); } + println!("leaves_hash_value {:?}", leaves_hash_value); + println!("value {:?}", value); if wipe { + msg!("wipe"); if let Some(blomfilter_stores) = bloom_filter_stores.as_mut() { if !current_batch.bloom_filter_is_wiped { (*blomfilter_stores) @@ -356,17 +390,13 @@ pub fn insert_into_current_batch( // When the batch is cleared check that sequence number is greater or equal than self.sequence_number // if not advance current root index to root index if current_batch.sequence_number != 0 { - if root_index.is_none() && sequence_number.is_none() { - root_index = Some(current_batch.root_index); - sequence_number = Some(current_batch.sequence_number); - current_batch.sequence_number = 0; - } else { - unreachable!("root_index is already set this is a bug."); - } + root_index = Some(current_batch.root_index); + sequence_number = Some(current_batch.sequence_number); } } else { current_batch.bloom_filter_is_wiped = false; } + current_batch.sequence_number = 0; } if let Some(value_store) = value_store.as_mut() { (*value_store).clear(); @@ -378,13 +408,7 @@ pub fn insert_into_current_batch( let queue_type = QueueType::from(queue_type); match queue_type { - // QueueType::Address => current_batch.insert_and_store( - // value, - // bloom_filter_stores.unwrap().as_mut_slice(), - // value_store.unwrap(), - // hashchain_store.unwrap(), - // ), - QueueType::Input => current_batch.insert( + QueueType::Input | QueueType::Address => current_batch.insert( value, leaves_hash_value.unwrap(), bloom_filter_stores.unwrap().as_mut_slice(), @@ -411,9 +435,7 @@ pub fn insert_into_current_batch( } } - if batches[account.currently_processing_batch_index as usize].get_state() - == BatchState::ReadyToUpdateTree - { + if batches[account.currently_processing_batch_index as usize].get_state() == BatchState::Full { account.currently_processing_batch_index += 1; account.currently_processing_batch_index %= len as u64; } @@ -486,6 +508,7 @@ pub fn init_queue( num_iters: u64, bloom_filter_capacity: u64, start_offset: &mut usize, + batch_start_index: u64, ) -> Result<( ManuallyDrop>, Vec>>, @@ -524,7 +547,7 @@ pub fn init_queue( bloom_filter_capacity, account.batch_size, account.zkp_batch_size, - account.batch_size * i, + account.batch_size * i + batch_start_index, )) .map_err(ProgramError::from)?; } @@ -616,6 +639,7 @@ pub fn assert_queue_inited( batches: &mut ManuallyDrop>, num_batches: usize, num_iters: u64, + start_index: u64, ) { assert_eq!(queue, ref_queue, "queue mismatch"); assert_eq!(batches.len(), num_batches, "batches mismatch"); @@ -625,18 +649,18 @@ pub fn assert_queue_inited( ref_queue.bloom_filter_capacity, ref_queue.batch_size, ref_queue.zkp_batch_size, - ref_queue.batch_size * i as u64, + ref_queue.batch_size * i as u64 + start_index, ); assert_eq!(batch, &ref_batch, "batch mismatch"); } - if queue_type == QueueType::Input as u64 { - assert_eq!(value_vecs.len(), 0, "value_vecs mismatch"); - assert_eq!(value_vecs.capacity(), 0, "value_vecs mismatch"); - } else { + if queue_type == QueueType::Output as u64 { assert_eq!(value_vecs.capacity(), num_batches, "value_vecs mismatch"); assert_eq!(value_vecs.len(), num_batches, "value_vecs mismatch"); + } else { + assert_eq!(value_vecs.len(), 0, "value_vecs mismatch"); + assert_eq!(value_vecs.capacity(), 0, "value_vecs mismatch"); } if queue_type == QueueType::Output as u64 { @@ -686,6 +710,7 @@ pub fn assert_queue_zero_copy_inited( let num_batches = ref_account.queue.num_batches as usize; let queue = zero_copy_account.get_account().queue; let queue_type = zero_copy_account.get_account().metadata.queue_type; + let next_index = zero_copy_account.get_account().next_index; assert_eq!( zero_copy_account.get_account().metadata, ref_account.metadata, @@ -700,6 +725,7 @@ pub fn assert_queue_zero_copy_inited( &mut zero_copy_account.batches, num_batches, num_iters, + next_index, ); } @@ -799,6 +825,10 @@ pub mod tests { // 1. Functional for 1 value { zero_copy_account.insert_into_current_batch(&value).unwrap(); + assert_eq!( + zero_copy_account.prove_inclusion_by_index(1, &value), + anchor_lang::err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) + ); assert_eq!( zero_copy_account.prove_inclusion_by_index_and_zero_out_leaf(1, &value), anchor_lang::err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) @@ -807,6 +837,16 @@ pub mod tests { zero_copy_account.prove_inclusion_by_index_and_zero_out_leaf(0, &value2), anchor_lang::err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) ); + assert!(zero_copy_account + .prove_inclusion_by_index(0, &value) + .is_ok()); + // prove inclusion for value out of range returns false + assert_eq!( + zero_copy_account + .prove_inclusion_by_index(100000, &[0u8; 32]) + .unwrap(), + false + ); assert!(zero_copy_account .prove_inclusion_by_index_and_zero_out_leaf(0, &value) .is_ok()); @@ -817,6 +857,10 @@ pub mod tests { zero_copy_account.prove_inclusion_by_index_and_zero_out_leaf(0, &value), anchor_lang::err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) ); + assert_eq!( + zero_copy_account.prove_inclusion_by_index(0, &value), + anchor_lang::err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) + ); } // 3. Functional for 2 values diff --git a/programs/account-compression/src/utils/constants.rs b/programs/account-compression/src/utils/constants.rs index d37e757d22..9030fa60f3 100644 --- a/programs/account-compression/src/utils/constants.rs +++ b/programs/account-compression/src/utils/constants.rs @@ -55,3 +55,9 @@ pub const DEFAULT_BATCH_SIZE: u64 = 50000; pub const DEFAULT_ZKP_BATCH_SIZE: u64 = 500; pub const DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE: u64 = 20 * 1024 + 8; + +#[constant] +pub const ADDRESS_TREE_INIT_ROOT_26: [u8; 32] = [ + 33, 133, 56, 184, 142, 166, 110, 161, 4, 140, 169, 247, 115, 33, 15, 181, 76, 89, 48, 126, 58, + 86, 204, 81, 16, 121, 185, 77, 75, 152, 43, 15, +]; diff --git a/programs/registry/src/account_compression_cpi/batch_update_address_tree.rs b/programs/registry/src/account_compression_cpi/batch_update_address_tree.rs new file mode 100644 index 0000000000..6cdc047d80 --- /dev/null +++ b/programs/registry/src/account_compression_cpi/batch_update_address_tree.rs @@ -0,0 +1,49 @@ +use crate::ForesterEpochPda; +use account_compression::{ + batched_merkle_tree::BatchedMerkleTreeAccount, program::AccountCompression, + utils::constants::CPI_AUTHORITY_PDA_SEED, +}; +use anchor_lang::prelude::*; + +#[derive(Accounts)] +pub struct BatchUpdateAddressTree<'info> { + /// CHECK: only eligible foresters can nullify leaves. Is checked in ix. + #[account(mut)] + pub registered_forester_pda: Option>, + pub authority: Signer<'info>, + /// CHECK: (seed constraints) used to invoke account compression program via cpi. + #[account(seeds = [CPI_AUTHORITY_PDA_SEED], bump)] + pub cpi_authority: AccountInfo<'info>, + /// CHECK: (account compression program) group access control. + pub registered_program_pda: AccountInfo<'info>, + pub account_compression_program: Program<'info, AccountCompression>, + /// CHECK: (account compression program) when emitting event. + pub log_wrapper: UncheckedAccount<'info>, + /// CHECK: (account compression program). + #[account(mut)] + pub merkle_tree: AccountLoader<'info, BatchedMerkleTreeAccount>, +} + +pub fn process_batch_update_address_tree( + ctx: &Context, + bump: u8, + data: Vec, +) -> Result<()> { + let bump = &[bump]; + let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; + let signer_seeds = &[&seeds[..]]; + let accounts = account_compression::cpi::accounts::BatchUpdateAddressTree { + authority: ctx.accounts.cpi_authority.to_account_info(), + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), + registered_program_pda: Some(ctx.accounts.registered_program_pda.clone()), + log_wrapper: ctx.accounts.log_wrapper.to_account_info(), + }; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.account_compression_program.to_account_info(), + accounts, + signer_seeds, + ); + + account_compression::cpi::batch_update_address_tree(cpi_ctx, data) +} diff --git a/programs/registry/src/account_compression_cpi/initialize_batched_address_tree.rs b/programs/registry/src/account_compression_cpi/initialize_batched_address_tree.rs new file mode 100644 index 0000000000..9e52d9bd4b --- /dev/null +++ b/programs/registry/src/account_compression_cpi/initialize_batched_address_tree.rs @@ -0,0 +1,43 @@ +use crate::protocol_config::state::ProtocolConfigPda; +use account_compression::InitAddressTreeAccountsInstructionData; +use account_compression::{program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED}; +use anchor_lang::prelude::*; + +#[derive(Accounts)] +pub struct InitializeBatchedAddressTree<'info> { + #[account(mut)] + pub authority: Signer<'info>, + /// CHECK: initializated in account compression program. + #[account(zero)] + pub merkle_tree: AccountInfo<'info>, + /// CHECK: (account compression program) access control. + pub registered_program_pda: AccountInfo<'info>, + /// CHECK: (seed constraints) used to invoke account compression program via cpi. + #[account(mut, seeds = [CPI_AUTHORITY_PDA_SEED], bump)] + pub cpi_authority: AccountInfo<'info>, + pub account_compression_program: Program<'info, AccountCompression>, + pub protocol_config_pda: Account<'info, ProtocolConfigPda>, +} + +pub fn process_initialize_batched_address_merkle_tree( + ctx: &Context, + bump: u8, + params: InitAddressTreeAccountsInstructionData, +) -> Result<()> { + let bump = &[bump]; + let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; + let signer_seeds = &[&seeds[..]]; + let accounts = account_compression::cpi::accounts::InitializeBatchAddressMerkleTree { + authority: ctx.accounts.cpi_authority.to_account_info(), + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), + registered_program_pda: Some(ctx.accounts.registered_program_pda.clone()), + }; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.account_compression_program.to_account_info(), + accounts, + signer_seeds, + ); + + account_compression::cpi::intialize_batched_address_merkle_tree(cpi_ctx, params) +} diff --git a/programs/registry/src/account_compression_cpi/mod.rs b/programs/registry/src/account_compression_cpi/mod.rs index 758f56a0ca..95642b3b5b 100644 --- a/programs/registry/src/account_compression_cpi/mod.rs +++ b/programs/registry/src/account_compression_cpi/mod.rs @@ -1,9 +1,12 @@ pub mod batch_append; pub mod batch_nullify; +pub mod batch_update_address_tree; +pub mod initialize_batched_address_tree; pub mod initialize_batched_state_tree; pub mod initialize_tree_and_queue; pub mod nullify; pub mod register_program; +pub mod rollover_batch_address_tree; pub mod rollover_batch_state_tree; pub mod rollover_state_tree; pub mod sdk; diff --git a/programs/registry/src/account_compression_cpi/rollover_batch_address_tree.rs b/programs/registry/src/account_compression_cpi/rollover_batch_address_tree.rs new file mode 100644 index 0000000000..a4898ce91c --- /dev/null +++ b/programs/registry/src/account_compression_cpi/rollover_batch_address_tree.rs @@ -0,0 +1,54 @@ +use crate::protocol_config::state::ProtocolConfigPda; +use crate::ForesterEpochPda; +use account_compression::utils::if_equals_zero_u64; +use account_compression::{program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED}; +use anchor_lang::prelude::*; + +#[derive(Accounts)] +pub struct RolloverBatchAddressMerkleTree<'info> { + /// CHECK: only eligible foresters can nullify leaves. Is checked in ix. + #[account(mut)] + pub registered_forester_pda: Option>, + #[account(mut)] + pub authority: Signer<'info>, + /// CHECK: initializated in account compression program. + #[account(zero)] + pub new_address_merkle_tree: AccountInfo<'info>, + /// CHECK: in account compression program. + #[account(mut)] + pub old_address_merkle_tree: AccountInfo<'info>, + /// CHECK: (account compression program) access control. + pub registered_program_pda: AccountInfo<'info>, + /// CHECK: (seed constraints) used to invoke account compression program via cpi. + #[account(mut, seeds = [CPI_AUTHORITY_PDA_SEED], bump)] + pub cpi_authority: AccountInfo<'info>, + pub account_compression_program: Program<'info, AccountCompression>, + pub protocol_config_pda: Account<'info, ProtocolConfigPda>, +} + +pub fn process_rollover_batch_address_merkle_tree( + ctx: &Context, + bump: u8, +) -> Result<()> { + let bump = &[bump]; + let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; + let signer_seeds = &[&seeds[..]]; + let accounts = account_compression::cpi::accounts::RolloverBatchAddressMerkleTree { + fee_payer: ctx.accounts.authority.to_account_info(), + authority: ctx.accounts.cpi_authority.to_account_info(), + old_address_merkle_tree: ctx.accounts.old_address_merkle_tree.to_account_info(), + new_address_merkle_tree: ctx.accounts.new_address_merkle_tree.to_account_info(), + registered_program_pda: Some(ctx.accounts.registered_program_pda.clone()), + }; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.account_compression_program.to_account_info(), + accounts, + signer_seeds, + ); + + account_compression::cpi::rollover_batch_address_merkle_tree( + cpi_ctx, + if_equals_zero_u64(ctx.accounts.protocol_config_pda.config.network_fee), + ) +} diff --git a/programs/registry/src/account_compression_cpi/sdk.rs b/programs/registry/src/account_compression_cpi/sdk.rs index 10e1bcce6d..2473fc3b55 100644 --- a/programs/registry/src/account_compression_cpi/sdk.rs +++ b/programs/registry/src/account_compression_cpi/sdk.rs @@ -5,8 +5,8 @@ use crate::utils::{ use account_compression::utils::constants::NOOP_PUBKEY; use account_compression::{ - AddressMerkleTreeConfig, AddressQueueConfig, InitStateTreeAccountsInstructionData, - NullifierQueueConfig, StateMerkleTreeConfig, + AddressMerkleTreeConfig, AddressQueueConfig, InitAddressTreeAccountsInstructionData, + InitStateTreeAccountsInstructionData, NullifierQueueConfig, StateMerkleTreeConfig, }; use anchor_lang::prelude::*; use anchor_lang::InstructionData; @@ -413,3 +413,85 @@ pub fn create_rollover_batch_state_tree_instruction( data: instruction_data.data(), } } + +pub fn create_initialize_batched_address_merkle_tree_instruction( + payer: Pubkey, + merkle_tree_pubkey: Pubkey, + params: InitAddressTreeAccountsInstructionData, +) -> Instruction { + let register_program_pda = get_registered_program_pda(&crate::ID); + let (cpi_authority, bump) = get_cpi_authority_pda(); + + let instruction_data = crate::instruction::InitializeBatchedAddressMerkleTree { bump, params }; + let protocol_config_pda = get_protocol_config_pda_address().0; + let accounts = crate::accounts::InitializeBatchedAddressTree { + authority: payer, + registered_program_pda: register_program_pda, + merkle_tree: merkle_tree_pubkey, + cpi_authority, + account_compression_program: account_compression::ID, + protocol_config_pda, + }; + Instruction { + program_id: crate::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + } +} + +pub fn create_batch_update_address_tree_instruction( + forester: Pubkey, + derivation_pubkey: Pubkey, + merkle_tree_pubkey: Pubkey, + epoch: u64, + data: Vec, +) -> Instruction { + let forester_epoch_pda = get_forester_epoch_pda_from_authority(&derivation_pubkey, epoch).0; + let registered_program_pda = get_registered_program_pda(&crate::ID); + + let (cpi_authority_pda, bump) = get_cpi_authority_pda(); + let accounts = crate::accounts::BatchUpdateAddressTree { + authority: forester, + merkle_tree: merkle_tree_pubkey, + cpi_authority: cpi_authority_pda, + registered_forester_pda: Some(forester_epoch_pda), + registered_program_pda, + account_compression_program: account_compression::ID, + log_wrapper: NOOP_PUBKEY.into(), + }; + let instruction_data = crate::instruction::BatchUpdateAddressTree { bump, data }; + Instruction { + program_id: crate::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + } +} + +pub fn create_rollover_batch_address_tree_instruction( + forester: Pubkey, + derivation_pubkey: Pubkey, + old_merkle_tree: Pubkey, + new_merkle_tree: Pubkey, + epoch: u64, +) -> Instruction { + let forester_epoch_pda = get_forester_epoch_pda_from_authority(&derivation_pubkey, epoch).0; + let registered_program_pda = get_registered_program_pda(&crate::ID); + + let (cpi_authority_pda, bump) = get_cpi_authority_pda(); + let accounts = crate::accounts::RolloverBatchAddressMerkleTree { + authority: forester, + new_address_merkle_tree: new_merkle_tree, + old_address_merkle_tree: old_merkle_tree, + cpi_authority: cpi_authority_pda, + registered_forester_pda: Some(forester_epoch_pda), + registered_program_pda, + account_compression_program: account_compression::ID, + protocol_config_pda: get_protocol_config_pda_address().0, + }; + let instruction_data = crate::instruction::RolloverBatchAddressMerkleTree { bump }; + Instruction { + program_id: crate::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + } +} diff --git a/programs/registry/src/lib.rs b/programs/registry/src/lib.rs index 8b5ce025c6..cd3a3c6ca2 100644 --- a/programs/registry/src/lib.rs +++ b/programs/registry/src/lib.rs @@ -8,9 +8,10 @@ pub mod account_compression_cpi; pub mod errors; pub use crate::epoch::{finalize_registration::*, register_epoch::*, report_work::*}; pub use account_compression_cpi::{ - batch_append::*, batch_nullify::*, initialize_batched_state_tree::*, - initialize_tree_and_queue::*, nullify::*, register_program::*, rollover_batch_state_tree::*, - rollover_state_tree::*, update_address_tree::*, + batch_append::*, batch_nullify::*, batch_update_address_tree::*, + initialize_batched_address_tree::*, initialize_batched_state_tree::*, + initialize_tree_and_queue::*, nullify::*, register_program::*, rollover_batch_address_tree::*, + rollover_batch_state_tree::*, rollover_state_tree::*, update_address_tree::*, }; pub use protocol_config::{initialize::*, update::*}; @@ -22,7 +23,9 @@ pub mod utils; use account_compression::MerkleTreeMetadata; pub use selection::forester::*; -use account_compression::InitStateTreeAccountsInstructionData; +use account_compression::{ + InitAddressTreeAccountsInstructionData, InitStateTreeAccountsInstructionData, +}; use anchor_lang::solana_program::pubkey::Pubkey; use errors::RegistryError; use protocol_config::state::ProtocolConfig; @@ -539,6 +542,63 @@ pub mod light_registry { process_batch_append(&ctx, bump, data) } + pub fn initialize_batched_address_merkle_tree( + ctx: Context, + bump: u8, + params: InitAddressTreeAccountsInstructionData, + ) -> Result<()> { + if let Some(network_fee) = params.network_fee { + if network_fee != ctx.accounts.protocol_config_pda.config.network_fee { + return err!(RegistryError::InvalidNetworkFee); + } + if params.forester.is_some() { + msg!("Forester pubkey must not be defined for trees serviced by light foresters."); + return err!(RegistryError::ForesterDefined); + } + } else if params.forester.is_none() { + msg!("Forester pubkey required for trees without a network fee."); + msg!("Trees without a network fee will not be serviced by light foresters."); + return err!(RegistryError::ForesterUndefined); + } + process_initialize_batched_address_merkle_tree(&ctx, bump, params) + } + + pub fn batch_update_address_tree<'info>( + ctx: Context<'_, '_, '_, 'info, BatchUpdateAddressTree<'info>>, + bump: u8, + data: Vec, + ) -> Result<()> { + { + let account = ctx.accounts.merkle_tree.load()?; + let metadata = account.metadata; + check_forester( + &metadata, + ctx.accounts.authority.key(), + ctx.accounts.merkle_tree.key(), + &mut ctx.accounts.registered_forester_pda, + account.queue.batch_size, + )?; + } + process_batch_update_address_tree(&ctx, bump, data) + } + + pub fn rollover_batch_address_merkle_tree<'info>( + ctx: Context<'_, '_, '_, 'info, RolloverBatchAddressMerkleTree<'info>>, + bump: u8, + ) -> Result<()> { + let account = ZeroCopyBatchedMerkleTreeAccount::address_tree_from_account_info_mut( + &ctx.accounts.old_address_merkle_tree, + )?; + check_forester( + &account.get_account().metadata, + ctx.accounts.authority.key(), + ctx.accounts.old_address_merkle_tree.key(), + &mut ctx.accounts.registered_forester_pda, + DEFAULT_WORK_V1, + )?; + process_rollover_batch_address_merkle_tree(&ctx, bump) + } + pub fn rollover_batch_state_merkle_tree<'info>( ctx: Context<'_, '_, '_, 'info, RolloverBatchStateMerkleTree<'info>>, bump: u8, diff --git a/programs/system/src/errors.rs b/programs/system/src/errors.rs index c4d7e0dc77..b1487faa3d 100644 --- a/programs/system/src/errors.rs +++ b/programs/system/src/errors.rs @@ -68,4 +68,6 @@ pub enum SystemProgramError { OutputMerkleTreeIndicesNotInOrder, OutputMerkleTreeNotUnique, DataFieldUndefined, + ReadOnlyAddressAlreadyExists, + ReadOnlyAccountDoesNotExist, } diff --git a/programs/system/src/invoke/address.rs b/programs/system/src/invoke/address.rs index 75c341302b..cec00e6aef 100644 --- a/programs/system/src/invoke/address.rs +++ b/programs/system/src/invoke/address.rs @@ -1,37 +1,74 @@ -use account_compression::utils::constants::CPI_AUTHORITY_PDA_SEED; -use anchor_lang::{prelude::*, Bumps}; +use account_compression::{ + batched_merkle_tree::BatchedMerkleTreeAccount, errors::AccountCompressionErrorCode, + utils::constants::CPI_AUTHORITY_PDA_SEED, AddressMerkleTreeAccount, +}; +use anchor_lang::{prelude::*, Bumps, Discriminator}; use crate::{ constants::CPI_AUTHORITY_PDA_BUMP, + errors::SystemProgramError, invoke_cpi::verify_signer::check_program_owner_address_merkle_tree, sdk::{ accounts::{InvokeAccounts, SignerAccounts}, - address::derive_address, + address::{derive_address, derive_address_legacy}, }, NewAddressParamsPacked, }; pub fn derive_new_addresses( + invoking_program_id: &Option, new_address_params: &[NewAddressParamsPacked], num_input_compressed_accounts: usize, remaining_accounts: &[AccountInfo], compressed_account_addresses: &mut [Option<[u8; 32]>], - new_addresses: &mut [[u8; 32]], + new_addresses: &mut Vec<[u8; 32]>, ) -> Result<()> { + let invoking_program_id_bytes = if let Some(invoking_program_id) = invoking_program_id { + invoking_program_id.to_bytes() + } else { + [0u8; 32] + }; + new_address_params .iter() .enumerate() .try_for_each(|(i, new_address_params)| { - let address = derive_address( + let mut discriminator_bytes = [0u8; 8]; + discriminator_bytes.copy_from_slice( &remaining_accounts[new_address_params.address_merkle_tree_account_index as usize] - .key(), - &new_address_params.seed, - ) - .map_err(ProgramError::from)?; + .try_borrow_data()?[0..8], + ); + let address = match discriminator_bytes { + AddressMerkleTreeAccount::DISCRIMINATOR => derive_address_legacy( + &remaining_accounts + [new_address_params.address_merkle_tree_account_index as usize] + .key(), + &new_address_params.seed, + ) + .map_err(ProgramError::from)?, + BatchedMerkleTreeAccount::DISCRIMINATOR => { + if invoking_program_id.is_none() { + return err!(SystemProgramError::DeriveAddressError); + } + derive_address( + &new_address_params.seed, + &remaining_accounts + [new_address_params.address_merkle_tree_account_index as usize] + .key() + .to_bytes(), + &invoking_program_id_bytes, + ) + } + _ => { + return err!( + AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch + ) + } + }; // We are inserting addresses into two vectors to avoid unwrapping // the option in following functions. compressed_account_addresses[i + num_input_compressed_accounts] = Some(address); - new_addresses[i] = address; + new_addresses.push(address); Ok(()) }) } diff --git a/programs/system/src/invoke/emit_event.rs b/programs/system/src/invoke/emit_event.rs index d2b34e3821..480b755bb0 100644 --- a/programs/system/src/invoke/emit_event.rs +++ b/programs/system/src/invoke/emit_event.rs @@ -13,21 +13,11 @@ use crate::{ pub fn emit_state_transition_event<'a, 'b, 'c: 'info, 'info, A: InvokeAccounts<'info> + Bumps>( inputs: InstructionDataInvoke, ctx: &'a Context<'a, 'b, 'c, 'info, A>, - mut input_compressed_account_hashes: Vec<[u8; 32]>, + input_compressed_account_hashes: Vec<[u8; 32]>, output_compressed_account_hashes: Vec<[u8; 32]>, output_leaf_indices: Vec, sequence_numbers: Vec, ) -> Result<()> { - // Do not include read-only accounts in the event. - for (i, account) in inputs - .input_compressed_accounts_with_merkle_context - .iter() - .enumerate() - { - if account.read_only { - input_compressed_account_hashes.remove(i); - } - } // Note: message is unimplemented // (if we compute the tx hash in indexer we don't need to modify the event.) let event = PublicTransactionEvent { diff --git a/programs/system/src/invoke/instruction.rs b/programs/system/src/invoke/instruction.rs index 30e49b59cf..a565fbecfe 100644 --- a/programs/system/src/invoke/instruction.rs +++ b/programs/system/src/invoke/instruction.rs @@ -127,3 +127,17 @@ pub struct NewAddressParams { pub address_merkle_tree_pubkey: Pubkey, pub address_merkle_tree_root_index: u16, } + +#[derive(Debug, PartialEq, Default, Clone, Copy, AnchorSerialize, AnchorDeserialize)] +pub struct PackedReadOnlyAddress { + pub address: [u8; 32], + pub address_merkle_tree_account_index: u8, + pub address_merkle_tree_root_index: u16, +} + +#[derive(Debug, PartialEq, Default, Clone, AnchorSerialize, AnchorDeserialize)] +pub struct ReadOnlyAddress { + pub address: [u8; 32], + pub address_merkle_tree_pubkey: Pubkey, + pub address_merkle_tree_root_index: u16, +} diff --git a/programs/system/src/invoke/nullify_state.rs b/programs/system/src/invoke/nullify_state.rs index c8e6b4bc9c..cc5e39f3b6 100644 --- a/programs/system/src/invoke/nullify_state.rs +++ b/programs/system/src/invoke/nullify_state.rs @@ -57,10 +57,6 @@ pub fn insert_nullifiers< // used. let mut network_fee_bundle = None; for account in input_compressed_accounts_with_merkle_context.iter() { - // Don't nullify read-only accounts. - if account.read_only { - continue; - } leaf_indices.push(account.merkle_context.leaf_index); let account_info = diff --git a/programs/system/src/invoke/processor.rs b/programs/system/src/invoke/processor.rs index a8cb8e6730..46005a0c1e 100644 --- a/programs/system/src/invoke/processor.rs +++ b/programs/system/src/invoke/processor.rs @@ -14,13 +14,20 @@ use crate::{ sum_check::sum_check, verify_state_proof::{ create_tx_hash, fetch_input_compressed_account_roots, fetch_roots_address_merkle_tree, - hash_input_compressed_accounts, verify_state_proof, + hash_input_compressed_accounts, verify_input_accounts_proof_by_index, + verify_read_only_account_inclusion, verify_read_only_address_queue_non_inclusion, + verify_state_proof, }, }, - sdk::accounts::{InvokeAccounts, SignerAccounts}, + sdk::{ + accounts::{InvokeAccounts, SignerAccounts}, + compressed_account::PackedReadOnlyCompressedAccount, + }, InstructionDataInvoke, }; +use super::PackedReadOnlyAddress; + // TODO: remove once upgraded to anchor 0.30.0 (right now it's required for idl generation) #[derive(Debug, Clone, PartialEq, Eq, AnchorSerialize, AnchorDeserialize)] pub struct CompressedProof { @@ -57,13 +64,15 @@ pub fn process< invoking_program: Option, ctx: Context<'a, 'b, 'c, 'info, A>, cpi_context_inputs: usize, + read_only_addresses: Option>, + read_only_accounts: Option>, ) -> Result<()> { if inputs.relay_fee.is_some() { unimplemented!("Relay fee is not implemented yet."); } // Sum check --------------------------------------------------- bench_sbf_start!("cpda_sum_check"); - let (num_read_only_input_accounts, num_prove_by_index_input_accounts) = sum_check( + let num_prove_by_index_input_accounts = sum_check( &inputs.input_compressed_accounts_with_merkle_context, &inputs.output_compressed_accounts, &inputs.relay_fee, @@ -103,9 +112,11 @@ pub fn process< let mut hashed_pubkeys = Vec::<(Pubkey, [u8; 32])>::with_capacity(hashed_pubkeys_capacity); // Verify state and or address proof --------------------------------------------------- - + let read_only_addresses = read_only_addresses.unwrap_or_default(); + let num_of_readonly_addresses = read_only_addresses.len(); // Allocate heap memory here because roots are only used for proof verification. - let mut new_address_roots = vec![[0u8; 32]; num_new_addresses]; + let mut new_address_roots = Vec::with_capacity(num_new_addresses + num_of_readonly_addresses); + let mut new_addresses = Vec::with_capacity(num_new_addresses + num_of_readonly_addresses); // hash input compressed accounts --------------------------------------------------- bench_sbf_start!("cpda_hash_input_compressed_accounts"); @@ -133,19 +144,16 @@ pub fn process< } bench_sbf_end!("cpda_hash_input_compressed_accounts"); - let mut new_addresses = vec![[0u8; 32]; num_new_addresses]; // Insert addresses into address merkle tree queue --------------------------------------------------- - let address_network_fee_bundle = if !new_addresses.is_empty() { + let address_network_fee_bundle = if num_new_addresses != 0 { derive_new_addresses( + &invoking_program, &inputs.new_address_params, num_input_compressed_accounts, ctx.remaining_accounts, &mut compressed_account_addresses, &mut new_addresses, - // TODO: add readonly addresses here )?; - fetch_roots_address_merkle_tree(&inputs.new_address_params, &ctx, &mut new_address_roots)?; - insert_addresses_into_address_merkle_tree_queue( &ctx, &new_addresses, @@ -198,15 +206,14 @@ pub fn process< // in certain cases we zero out roots in batched input queues. // These roots need to be zero prior to proof verification. bench_sbf_start!("cpda_nullifiers"); - let input_network_fee_bundle = if num_input_compressed_accounts > num_read_only_input_accounts { + let input_network_fee_bundle = if num_input_compressed_accounts != 0 { // Access the current slot let current_slot = Clock::get()?.slot; let tx_hash = create_tx_hash( - &inputs.input_compressed_accounts_with_merkle_context, &input_compressed_account_hashes, &output_compressed_account_hashes, current_slot, - ); + )?; // Insert nullifiers for compressed input account hashes into nullifier // queue except read-only accounts. insert_nullifiers( @@ -229,8 +236,22 @@ pub fn process< output_network_fee_bundle, )?; + // Verify that all instances of queue_index.is_some() are plausible. + if num_prove_by_index_input_accounts != 0 { + verify_input_accounts_proof_by_index( + ctx.remaining_accounts, + &inputs.input_compressed_accounts_with_merkle_context, + )?; + } + + // Proof inputs order: + // 1. input compressed accounts + // 2. read only compressed accounts + // 3. new addresses + // 4. read only addresses if num_prove_by_index_input_accounts < num_input_compressed_accounts - || !inputs.new_address_params.is_empty() + || new_address_roots.capacity() != 0 + || read_only_accounts.as_ref().map_or(false, |x| !x.is_empty()) { bench_sbf_start!("cpda_verify_state_proof"); if let Some(proof) = inputs.proof.as_ref() { @@ -240,6 +261,7 @@ pub fn process< b: proof.b, c: proof.c, }; + let mut input_compressed_account_roots = Vec::with_capacity(num_input_compressed_accounts); fetch_input_compressed_account_roots( @@ -247,12 +269,37 @@ pub fn process< &ctx, &mut input_compressed_account_roots, )?; + let read_only_accounts = read_only_accounts.unwrap_or_default(); + let mut read_only_accounts_roots = Vec::with_capacity(read_only_accounts.len()); + fetch_input_compressed_account_roots( + &read_only_accounts, + &ctx, + &mut read_only_accounts_roots, + )?; + verify_read_only_account_inclusion(ctx.remaining_accounts, &read_only_accounts)?; + + fetch_roots_address_merkle_tree( + &inputs.new_address_params, + &read_only_addresses, + &ctx, + &mut new_address_roots, + )?; + verify_read_only_address_queue_non_inclusion( + ctx.remaining_accounts, + &read_only_addresses, + )?; + + for read_only_address in read_only_addresses.iter() { + new_addresses.push(read_only_address.address); + } match verify_state_proof( &inputs.input_compressed_accounts_with_merkle_context, - &input_compressed_account_roots, + input_compressed_account_roots, &input_compressed_account_hashes, &new_address_roots, &new_addresses, + &read_only_accounts, + &read_only_accounts_roots, &compressed_verifier_proof, ) { Ok(_) => Ok(()), @@ -262,11 +309,13 @@ pub fn process< "input_compressed_account_hashes {:?}", input_compressed_account_hashes ); - msg!("input roots {:?}", input_compressed_account_roots); + // msg!("input roots {:?}", input_compressed_account_roots); msg!( "input_compressed_accounts_with_merkle_context: {:?}", inputs.input_compressed_accounts_with_merkle_context ); + msg!("new_address_roots {:?}", new_address_roots); + msg!("new_addresses {:?}", new_addresses); Err(e) } }?; diff --git a/programs/system/src/invoke/sum_check.rs b/programs/system/src/invoke/sum_check.rs index ca22346eb1..22856b57d3 100644 --- a/programs/system/src/invoke/sum_check.rs +++ b/programs/system/src/invoke/sum_check.rs @@ -14,9 +14,8 @@ pub fn sum_check( relay_fee: &Option, compress_or_decompress_lamports: &Option, is_compress: &bool, -) -> Result<(usize, usize)> { +) -> Result { let mut sum: u64 = 0; - let num_read_only = 0; let mut num_prove_by_index_accounts = 0; for compressed_account_with_context in input_compressed_accounts_with_merkle_context.iter() { if compressed_account_with_context @@ -26,12 +25,11 @@ pub fn sum_check( { num_prove_by_index_accounts += 1; } - // Readonly accounts are not included in the sum check, since these are - // not invalidated in this transaction. + // Readonly accounts are only supported as separate inputs. if compressed_account_with_context.read_only { - unimplemented!("read_only accounts are not supported. Set read_only to false."); - // num_read_only += 1; - // continue; + unimplemented!( + "Read accounts are only supported as separate inputs in the invoke_cpi_with_read_only instruction. Set read_only to false." + ); } sum = sum .checked_add(compressed_account_with_context.compressed_account.lamports) @@ -71,7 +69,7 @@ pub fn sum_check( } if sum == 0 { - Ok((num_read_only, num_prove_by_index_accounts)) + Ok(num_prove_by_index_accounts) } else { Err(SystemProgramError::SumCheckFailed.into()) } @@ -82,7 +80,7 @@ mod test { use solana_sdk::{signature::Keypair, signer::Signer}; use super::*; - use crate::sdk::compressed_account::{CompressedAccount, PackedMerkleContext}; + use crate::sdk::compressed_account::{CompressedAccount, PackedMerkleContext, QueueIndex}; #[test] fn test_sum_check() { @@ -130,17 +128,41 @@ mod test { // FAIL: relay fee sum_check_test(&[100, 50], &[2125], Some(25 - 1), None, false).unwrap_err(); sum_check_test(&[100, 50], &[2125], Some(25 + 1), None, false).unwrap_err(); + for i in 0..10 { + sum_check_test_with_num(&vec![150; i], &vec![150; i], None, None, false, i).unwrap(); + } } - fn sum_check_test( input_amounts: &[u64], output_amounts: &[u64], relay_fee: Option, compress_or_decompress_lamports: Option, is_compress: bool, - ) -> Result<(usize, usize)> { + ) -> Result<()> { + sum_check_test_with_num( + input_amounts, + output_amounts, + relay_fee, + compress_or_decompress_lamports, + is_compress, + 0, + ) + } + fn sum_check_test_with_num( + input_amounts: &[u64], + output_amounts: &[u64], + relay_fee: Option, + compress_or_decompress_lamports: Option, + is_compress: bool, + num_by_index: usize, + ) -> Result<()> { let mut inputs = Vec::new(); - for i in input_amounts.iter() { + for (index, i) in input_amounts.iter().enumerate() { + let queue_index = if index < num_by_index { + Some(QueueIndex::default()) + } else { + None + }; inputs.push(PackedCompressedAccountWithMerkleContext { compressed_account: CompressedAccount { owner: Keypair::new().pubkey(), @@ -152,7 +174,7 @@ mod test { merkle_tree_pubkey_index: 0, nullifier_queue_pubkey_index: 0, leaf_index: 0, - queue_index: None, + queue_index, }, root_index: 1, read_only: false, @@ -171,12 +193,15 @@ mod test { }); } - sum_check( + let calc_num_prove_by_index_accounts = sum_check( &inputs, &outputs, &relay_fee, &compress_or_decompress_lamports, &is_compress, - ) + )?; + + assert_eq!(num_by_index, calc_num_prove_by_index_accounts); + Ok(()) } } diff --git a/programs/system/src/invoke/verify_state_proof.rs b/programs/system/src/invoke/verify_state_proof.rs index 41f93b53fc..87c401e839 100644 --- a/programs/system/src/invoke/verify_state_proof.rs +++ b/programs/system/src/invoke/verify_state_proof.rs @@ -1,10 +1,19 @@ use crate::{ - sdk::{accounts::InvokeAccounts, compressed_account::PackedCompressedAccountWithMerkleContext}, + errors::SystemProgramError, + sdk::{ + accounts::InvokeAccounts, + compressed_account::{ + FetchRoot, PackedCompressedAccountWithMerkleContext, PackedReadOnlyCompressedAccount, + }, + }, NewAddressParamsPacked, }; use account_compression::{ - batched_merkle_tree::{BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount}, - utils::check_discrimininator::check_discriminator, + batched_merkle_tree::{ + create_hash_chain_from_slice, BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount, + }, + batched_queue::ZeroCopyBatchedQueueAccount, + errors::AccountCompressionErrorCode, AddressMerkleTreeAccount, StateMerkleTreeAccount, }; use anchor_lang::{prelude::*, Bumps, Discriminator}; @@ -19,7 +28,8 @@ use light_verifier::{ }; use std::mem; -// TODO: add support for batched Merkle trees +use super::PackedReadOnlyAddress; + #[inline(never)] #[heap_neutral] pub fn fetch_input_compressed_account_roots< @@ -28,8 +38,9 @@ pub fn fetch_input_compressed_account_roots< 'c: 'info, 'info, A: InvokeAccounts<'info> + Bumps, + F: FetchRoot, >( - input_compressed_accounts_with_merkle_context: &'a [PackedCompressedAccountWithMerkleContext], + input_compressed_accounts_with_merkle_context: &'a [F], ctx: &'a Context<'a, 'b, 'c, 'info, A>, roots: &'a mut Vec<[u8; 32]>, ) -> Result<()> { @@ -38,20 +49,21 @@ pub fn fetch_input_compressed_account_roots< { // Skip accounts which prove inclusion by index in output queue. if input_compressed_account_with_context - .merkle_context + .get_merkle_context() .queue_index .is_some() { continue; } - let merkle_tree = &ctx.remaining_accounts[input_compressed_account_with_context - .merkle_context - .merkle_tree_pubkey_index as usize]; - let merkle_tree = &mut merkle_tree.try_borrow_mut_data()?; + let merkle_tree_account_info = &ctx.remaining_accounts[input_compressed_account_with_context + .get_merkle_context() + .merkle_tree_pubkey_index + as usize]; let mut discriminator_bytes = [0u8; 8]; - discriminator_bytes.copy_from_slice(&merkle_tree[0..8]); + discriminator_bytes.copy_from_slice(&merkle_tree_account_info.try_borrow_data()?[0..8]); match discriminator_bytes { StateMerkleTreeAccount::DISCRIMINATOR => { + let merkle_tree = &mut merkle_tree_account_info.try_borrow_mut_data()?; let merkle_tree = ConcurrentMerkleTreeZeroCopy::::from_bytes_zero_copy( &merkle_tree[8 + mem::size_of::()..], @@ -59,19 +71,25 @@ pub fn fetch_input_compressed_account_roots< .map_err(ProgramError::from)?; let fetched_roots = &merkle_tree.roots; - (*roots) - .push(fetched_roots[input_compressed_account_with_context.root_index as usize]); + (*roots).push( + fetched_roots[input_compressed_account_with_context.get_root_index() as usize], + ); } BatchedMerkleTreeAccount::DISCRIMINATOR => { - let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(merkle_tree) + let merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_account_info_mut( + merkle_tree_account_info, + ) .map_err(ProgramError::from)?; (*roots).push( merkle_tree.root_history - [input_compressed_account_with_context.root_index as usize], + [input_compressed_account_with_context.get_root_index() as usize], ); } _ => { - return err!(crate::ErrorCode::AccountDiscriminatorMismatch); + return err!( + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch + ); } } } @@ -88,23 +106,182 @@ pub fn fetch_roots_address_merkle_tree< A: InvokeAccounts<'info> + Bumps, >( new_address_params: &'a [NewAddressParamsPacked], + read_only_addresses: &'a [PackedReadOnlyAddress], ctx: &'a Context<'a, 'b, 'c, 'info, A>, - roots: &'a mut [[u8; 32]], + roots: &'a mut Vec<[u8; 32]>, +) -> Result<()> { + for new_address_param in new_address_params.iter() { + fetch_address_root::( + ctx, + new_address_param.address_merkle_tree_account_index, + new_address_param.address_merkle_tree_root_index, + roots, + )?; + } + for read_only_address in read_only_addresses.iter() { + fetch_address_root::( + ctx, + read_only_address.address_merkle_tree_account_index, + read_only_address.address_merkle_tree_root_index, + roots, + )?; + } + Ok(()) +} + +/// For each input account which is marked to be proven by index +/// 1. check that it can exist in the output queue +/// - note the output queue checks whether the value acutally exists in the queue +/// - the purpose of this check is to catch marked input accounts which shouldn't be proven by index +#[inline(always)] +pub fn verify_input_accounts_proof_by_index<'a>( + remaining_accounts: &'a [AccountInfo<'_>], + input_accounts: &'a [PackedCompressedAccountWithMerkleContext], +) -> Result<()> { + for account in input_accounts.iter() { + if account.merkle_context.queue_index.is_some() { + let output_queue_account_info = + &remaining_accounts[account.merkle_context.nullifier_queue_pubkey_index as usize]; + let output_queue = + &mut ZeroCopyBatchedQueueAccount::output_queue_from_account_info_mut( + output_queue_account_info, + ) + .map_err(ProgramError::from)?; + output_queue.could_exist_in_batches(account.merkle_context.leaf_index as u64)?; + } + } + Ok(()) +} + +/// For each read-only account +/// 1. prove inclusion by index in the output queue if leaf index should exist in the output queue. +/// 1.1. if proved inclusion by index, return Ok. +/// 2. prove non-inclusion in the bloom filters +/// 2.1. skip wiped batches. +/// 2.2. prove non-inclusion in the bloom filters for each batch. +#[inline(always)] +pub fn verify_read_only_account_inclusion<'a>( + remaining_accounts: &'a [AccountInfo<'_>], + read_only_accounts: &'a [PackedReadOnlyCompressedAccount], ) -> Result<()> { - for (i, new_address_param) in new_address_params.iter().enumerate() { - let merkle_tree = ctx.remaining_accounts - [new_address_param.address_merkle_tree_account_index as usize] - .to_account_info(); - let merkle_tree = merkle_tree.try_borrow_data()?; - check_discriminator::(&merkle_tree)?; + for read_only_account in read_only_accounts.iter() { + let output_queue_account_info = &remaining_accounts[read_only_account + .merkle_context + .nullifier_queue_pubkey_index + as usize]; + let output_queue = &mut ZeroCopyBatchedQueueAccount::output_queue_from_account_info_mut( + output_queue_account_info, + ) + .map_err(ProgramError::from)?; + let proved_inclusion = output_queue + .prove_inclusion_by_index( + read_only_account.merkle_context.leaf_index as u64, + &read_only_account.account_hash, + ) + .map_err(|_| SystemProgramError::ReadOnlyAccountDoesNotExist)?; + if !proved_inclusion && read_only_account.merkle_context.queue_index.is_some() { + msg!("Expected read-only account in the output queue but does not exist."); + return err!(SystemProgramError::ReadOnlyAccountDoesNotExist); + } + // If we prove inclusion by index we do not need to check non-inclusion in bloom filters. + if !proved_inclusion { + let merkle_tree_account_info = &remaining_accounts + [read_only_account.merkle_context.merkle_tree_pubkey_index as usize]; + let merkle_tree = + &mut ZeroCopyBatchedMerkleTreeAccount::state_tree_from_account_info_mut( + merkle_tree_account_info, + ) + .map_err(ProgramError::from)?; + + let num_bloom_filters = merkle_tree.bloom_filter_stores.len(); + for i in 0..num_bloom_filters { + let bloom_filter_store = merkle_tree.bloom_filter_stores[i].as_mut_slice(); + let batch = &merkle_tree.batches[i]; + if !batch.bloom_filter_is_wiped { + batch + .check_non_inclusion(&read_only_account.account_hash, bloom_filter_store) + .map_err(|_| SystemProgramError::ReadOnlyAccountDoesNotExist)?; + } + } + } + } + Ok(()) +} + +#[inline(always)] +pub fn verify_read_only_address_queue_non_inclusion<'a>( + remaining_accounts: &'a [AccountInfo<'_>], + read_only_addresses: &'a [PackedReadOnlyAddress], +) -> Result<()> { + for read_only_address in read_only_addresses.iter() { + let merkle_tree_account_info = + &remaining_accounts[read_only_address.address_merkle_tree_account_index as usize]; let merkle_tree = - IndexedMerkleTreeZeroCopy::::from_bytes_zero_copy( - &merkle_tree[8 + mem::size_of::()..], + &mut ZeroCopyBatchedMerkleTreeAccount::address_tree_from_account_info_mut( + merkle_tree_account_info, ) .map_err(ProgramError::from)?; - let fetched_roots = &merkle_tree.roots; - roots[i] = fetched_roots[new_address_param.address_merkle_tree_root_index as usize]; + let num_bloom_filters = merkle_tree.bloom_filter_stores.len(); + for i in 0..num_bloom_filters { + let bloom_filter_store = merkle_tree.bloom_filter_stores[i].as_mut_slice(); + let batch = &merkle_tree.batches[i]; + match batch.check_non_inclusion(&read_only_address.address, bloom_filter_store) { + Ok(_) => {} + Err(_) => { + return err!(SystemProgramError::ReadOnlyAddressAlreadyExists); + } + } + } + } + Ok(()) +} + +fn fetch_address_root< + 'a, + 'b, + 'c: 'info, + 'info, + const IS_READ_ONLY: bool, + A: InvokeAccounts<'info> + Bumps, +>( + ctx: &'a Context<'a, 'b, 'c, 'info, A>, + address_merkle_tree_account_index: u8, + address_merkle_tree_root_index: u16, + roots: &'a mut Vec<[u8; 32]>, +) -> Result<()> { + let merkle_tree_account_info = + &ctx.remaining_accounts[address_merkle_tree_account_index as usize]; + let mut discriminator_bytes = [0u8; 8]; + discriminator_bytes.copy_from_slice(&merkle_tree_account_info.try_borrow_data()?[0..8]); + match discriminator_bytes { + AddressMerkleTreeAccount::DISCRIMINATOR => { + if IS_READ_ONLY { + msg!("Read only addresses are only supported for batch address trees."); + return err!( + AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch + ); + } + let merkle_tree = merkle_tree_account_info.try_borrow_data()?; + let merkle_tree = + IndexedMerkleTreeZeroCopy::::from_bytes_zero_copy( + &merkle_tree[8 + mem::size_of::()..], + ) + .map_err(ProgramError::from)?; + (*roots).push(merkle_tree.roots[address_merkle_tree_root_index as usize]); + } + BatchedMerkleTreeAccount::DISCRIMINATOR => { + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::address_tree_from_account_info_mut( + merkle_tree_account_info, + ) + .map_err(ProgramError::from)?; + (*roots).push(merkle_tree.root_history[address_merkle_tree_root_index as usize]); + } + _ => { + return err!( + AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch + ); + } } Ok(()) } @@ -137,19 +314,15 @@ pub fn hash_input_compressed_accounts<'a, 'b, 'c: 'info, 'info>( .iter() .enumerate() { - // Skip read-only accounts. Read-only accounts are just included in - // proof verification, but since these accounts are not invalidated the - // address and lamports must not be used in sum and address checks. - if !input_compressed_account_with_context.read_only { - // For heap neutrality we cannot allocate new heap memory in this function. - match &input_compressed_account_with_context - .compressed_account - .address - { - Some(address) => addresses[j] = Some(*address), - None => {} - }; - } + // For heap neutrality we cannot allocate new heap memory in this function. + match &input_compressed_account_with_context + .compressed_account + .address + { + Some(address) => addresses[j] = Some(*address), + None => {} + }; + #[allow(clippy::comparison_chain)] if current_mt_index != input_compressed_account_with_context @@ -224,17 +397,20 @@ pub fn hash_input_compressed_accounts<'a, 'b, 'c: 'info, 'info>( Ok(()) } +#[allow(clippy::too_many_arguments)] #[heap_neutral] pub fn verify_state_proof( input_compressed_accounts_with_merkle_context: &[PackedCompressedAccountWithMerkleContext], - roots: &[[u8; 32]], + mut roots: Vec<[u8; 32]>, leaves: &[[u8; 32]], address_roots: &[[u8; 32]], addresses: &[[u8; 32]], + read_only_accounts: &[PackedReadOnlyCompressedAccount], + read_only_roots: &[[u8; 32]], compressed_proof: &CompressedProof, ) -> anchor_lang::Result<()> { // Filter out leaves that are not in the proof (proven by index). - let proof_input_leaves = leaves + let mut proof_input_leaves = leaves .iter() .enumerate() .filter(|(x, _)| { @@ -245,9 +421,17 @@ pub fn verify_state_proof( }) .map(|x| *x.1) .collect::>(); + + read_only_accounts.iter().for_each(|x| { + if x.merkle_context.queue_index.is_none() { + proof_input_leaves.extend_from_slice(&[x.account_hash]); + } + }); + roots.extend_from_slice(read_only_roots); + if !addresses.is_empty() && !proof_input_leaves.is_empty() { verify_create_addresses_and_merkle_proof_zkp( - roots, + &roots, &proof_input_leaves, address_roots, addresses, @@ -258,69 +442,21 @@ pub fn verify_state_proof( verify_create_addresses_zkp(address_roots, addresses, compressed_proof) .map_err(ProgramError::from)?; } else { - verify_merkle_proof_zkp(roots, &proof_input_leaves, compressed_proof) + verify_merkle_proof_zkp(&roots, &proof_input_leaves, compressed_proof) .map_err(ProgramError::from)?; } Ok(()) } pub fn create_tx_hash( - input_compressed_accounts_with_merkle_context: &[PackedCompressedAccountWithMerkleContext], input_compressed_account_hashes: &[[u8; 32]], output_compressed_account_hashes: &[[u8; 32]], current_slot: u64, -) -> [u8; 32] { - use light_hasher::Hasher; - // Do not include read-only accounts in the event. - let index = find_first_non_read_only_account(input_compressed_accounts_with_merkle_context); - // TODO: extend with message hash (first 32 bytes of the message) - let mut tx_hash = input_compressed_account_hashes[index]; - for (i, hash) in input_compressed_account_hashes - .iter() - .skip(index + 1) - .enumerate() - { - if input_compressed_accounts_with_merkle_context[i].read_only { - continue; - } - tx_hash = Poseidon::hashv(&[&tx_hash, hash]).unwrap(); - } - tx_hash = Poseidon::hashv(&[&tx_hash, ¤t_slot.to_be_bytes()]).unwrap(); - for hash in output_compressed_account_hashes.iter() { - tx_hash = Poseidon::hashv(&[&tx_hash, hash]).unwrap(); - } - tx_hash -} - -fn find_first_non_read_only_account( - input_compressed_accounts_with_merkle_context: &[PackedCompressedAccountWithMerkleContext], -) -> usize { - for (i, account) in input_compressed_accounts_with_merkle_context - .iter() - .enumerate() - { - if !account.read_only { - return i; - } - } - 0 -} - -pub fn create_tx_hash_offchain( - input_compressed_account_hashes: &[[u8; 32]], - output_compressed_account_hashes: &[[u8; 32]], - current_slot: u64, -) -> [u8; 32] { - use light_hasher::Hasher; - // Do not include read-only accounts in the event. - // TODO: extend with message hash (first 32 bytes of the message) - let mut tx_hash = input_compressed_account_hashes[0]; - for hash in input_compressed_account_hashes.iter().skip(1) { - tx_hash = Poseidon::hashv(&[&tx_hash, hash]).unwrap(); - } - tx_hash = Poseidon::hashv(&[&tx_hash, ¤t_slot.to_be_bytes()]).unwrap(); - for hash in output_compressed_account_hashes.iter() { - tx_hash = Poseidon::hashv(&[&tx_hash, hash]).unwrap(); - } - tx_hash +) -> Result<[u8; 32]> { + let version = [0u8; 32]; + let mut slot_bytes = [0u8; 32]; + slot_bytes[24..].copy_from_slice(¤t_slot.to_be_bytes()); + let inputs_hash_chain = create_hash_chain_from_slice(input_compressed_account_hashes)?; + let outputs_hash_chain = create_hash_chain_from_slice(output_compressed_account_hashes)?; + create_hash_chain_from_slice(&[version, inputs_hash_chain, outputs_hash_chain, slot_bytes]) } diff --git a/programs/system/src/invoke_cpi/instruction.rs b/programs/system/src/invoke_cpi/instruction.rs index ba794441a3..7dae64f1bf 100644 --- a/programs/system/src/invoke_cpi/instruction.rs +++ b/programs/system/src/invoke_cpi/instruction.rs @@ -9,10 +9,12 @@ use crate::{ invoke::{processor::CompressedProof, sol_compression::SOL_POOL_PDA_SEED}, sdk::{ accounts::{InvokeAccounts, SignerAccounts}, - compressed_account::PackedCompressedAccountWithMerkleContext, + compressed_account::{ + PackedCompressedAccountWithMerkleContext, PackedReadOnlyCompressedAccount, + }, CompressedCpiContext, }, - NewAddressParamsPacked, OutputCompressedAccountWithPackedContext, + NewAddressParamsPacked, OutputCompressedAccountWithPackedContext, PackedReadOnlyAddress, }; #[derive(Accounts)] @@ -113,6 +115,13 @@ impl InstructionDataInvokeCpi { } } +#[derive(Debug, PartialEq, Default, Clone, AnchorSerialize, AnchorDeserialize)] +pub struct InstructionDataInvokeCpiWithReadOnly { + pub invoke_cpi: InstructionDataInvokeCpi, + pub read_only_addresses: Option>, + pub read_only_accounts: Option>, +} + #[cfg(test)] mod tests { use std::vec; diff --git a/programs/system/src/invoke_cpi/processor.rs b/programs/system/src/invoke_cpi/processor.rs index f95c15a7e2..e59d56e430 100644 --- a/programs/system/src/invoke_cpi/processor.rs +++ b/programs/system/src/invoke_cpi/processor.rs @@ -3,8 +3,10 @@ use light_heap::{bench_sbf_end, bench_sbf_start}; use super::verify_signer::cpi_signer_checks; use crate::{ - invoke::processor::process, invoke_cpi::instruction::InvokeCpiInstruction, - sdk::accounts::SignerAccounts, InstructionDataInvoke, InstructionDataInvokeCpi, + invoke::processor::process, + invoke_cpi::instruction::InvokeCpiInstruction, + sdk::{accounts::SignerAccounts, compressed_account::PackedReadOnlyCompressedAccount}, + InstructionDataInvoke, InstructionDataInvokeCpi, PackedReadOnlyAddress, }; /// Processes an `InvokeCpi` instruction. @@ -15,6 +17,8 @@ use crate::{ pub fn process_invoke_cpi<'a, 'b, 'c: 'info + 'b, 'info>( mut ctx: Context<'a, 'b, 'c, 'info, InvokeCpiInstruction<'info>>, inputs: InstructionDataInvokeCpi, + read_only_addresses: Option>, + read_only_accounts: Option>, ) -> Result<()> { bench_sbf_start!("cpda_cpi_signer_checks"); cpi_signer_checks( @@ -59,5 +63,7 @@ pub fn process_invoke_cpi<'a, 'b, 'c: 'info + 'b, 'info>( Some(ctx.accounts.invoking_program.key()), ctx, cpi_context_inputs_len, + read_only_addresses, + read_only_accounts, ) } diff --git a/programs/system/src/invoke_cpi/verify_signer.rs b/programs/system/src/invoke_cpi/verify_signer.rs index 10792be881..f594fd4e0a 100644 --- a/programs/system/src/invoke_cpi/verify_signer.rs +++ b/programs/system/src/invoke_cpi/verify_signer.rs @@ -1,6 +1,7 @@ use account_compression::{ batched_merkle_tree::{BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount}, batched_queue::{BatchedQueueAccount, ZeroCopyBatchedQueueAccount}, + errors::AccountCompressionErrorCode, utils::constants::CPI_AUTHORITY_PDA_SEED, AddressMerkleTreeAccount, StateMerkleTreeAccount, }; @@ -153,8 +154,10 @@ pub fn check_program_owner_state_merkle_tree<'a, 'b: 'a>( ) } BatchedMerkleTreeAccount::DISCRIMINATOR => { - let merkle_tree = &mut merkle_tree_acc_info.try_borrow_mut_data()?; - let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(merkle_tree) + let merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_account_info_mut( + merkle_tree_acc_info, + ) .map_err(ProgramError::from)?; let account = merkle_tree.get_account(); let seq = account.sequence_number + 1; @@ -169,9 +172,10 @@ pub fn check_program_owner_state_merkle_tree<'a, 'b: 'a>( ) } BatchedQueueAccount::DISCRIMINATOR => { - let merkle_tree = &mut merkle_tree_acc_info.try_borrow_mut_data()?; - let merkle_tree = ZeroCopyBatchedQueueAccount::from_bytes_mut(merkle_tree) - .map_err(ProgramError::from)?; + let merkle_tree = ZeroCopyBatchedQueueAccount::output_queue_from_account_info_mut( + merkle_tree_acc_info, + ) + .map_err(ProgramError::from)?; let account = merkle_tree.get_account(); let seq = u64::MAX; let next_index: u32 = account.next_index.try_into().unwrap(); @@ -185,7 +189,9 @@ pub fn check_program_owner_state_merkle_tree<'a, 'b: 'a>( ) } _ => { - return err!(crate::ErrorCode::AccountDiscriminatorMismatch); + return err!( + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch + ); } } }; @@ -206,7 +212,7 @@ pub fn check_program_owner_state_merkle_tree<'a, 'b: 'a>( invoking_program, program_owner ); - return Err(SystemProgramError::InvalidMerkleTreeOwner.into()); + return err!(SystemProgramError::InvalidMerkleTreeOwner); } Ok((next_index, network_fee, seq, merkle_tree_pubkey)) } @@ -216,21 +222,45 @@ pub fn check_program_owner_address_merkle_tree<'a, 'b: 'a>( merkle_tree_acc_info: &'b AccountInfo<'a>, invoking_program: &Option, ) -> Result> { - let merkle_tree = - AccountLoader::::try_from(merkle_tree_acc_info).unwrap(); - let merkle_tree_unpacked = merkle_tree.load()?; - let network_fee = if merkle_tree_unpacked.metadata.rollover_metadata.network_fee != 0 { - Some(merkle_tree_unpacked.metadata.rollover_metadata.network_fee) + let discriminator_bytes = merkle_tree_acc_info.try_borrow_data()?[0..8] + .try_into() + .unwrap(); + + let metadata = match discriminator_bytes { + AddressMerkleTreeAccount::DISCRIMINATOR => { + let merkle_tree = + AccountLoader::::try_from(merkle_tree_acc_info).unwrap(); + let merkle_tree_unpacked = merkle_tree.load()?; + merkle_tree_unpacked.metadata + } + BatchedMerkleTreeAccount::DISCRIMINATOR => { + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::address_tree_from_account_info_mut( + merkle_tree_acc_info, + ) + .map_err(ProgramError::from)?; + let account = merkle_tree.get_account(); + account.metadata + } + _ => { + return err!( + AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch + ); + } + }; + + let network_fee = if metadata.rollover_metadata.network_fee != 0 { + Some(metadata.rollover_metadata.network_fee) } else { None }; - if merkle_tree_unpacked.metadata.access_metadata.program_owner != Pubkey::default() { + + if metadata.access_metadata.program_owner != Pubkey::default() { if let Some(invoking_program) = invoking_program { - if *invoking_program == merkle_tree_unpacked.metadata.access_metadata.program_owner { + if *invoking_program == metadata.access_metadata.program_owner { msg!( "invoking_program.key() {:?} == merkle_tree_unpacked.program_owner {:?}", invoking_program, - merkle_tree_unpacked.metadata.access_metadata.program_owner + metadata.access_metadata.program_owner ); return Ok(network_fee); } @@ -238,7 +268,7 @@ pub fn check_program_owner_address_merkle_tree<'a, 'b: 'a>( msg!( "invoking_program.key() {:?} == merkle_tree_unpacked.program_owner {:?}", invoking_program, - merkle_tree_unpacked.metadata.access_metadata.program_owner + metadata.access_metadata.program_owner ); err!(SystemProgramError::InvalidMerkleTreeOwner) } else { diff --git a/programs/system/src/lib.rs b/programs/system/src/lib.rs index 69d68a3293..57b995f923 100644 --- a/programs/system/src/lib.rs +++ b/programs/system/src/lib.rs @@ -27,6 +27,7 @@ use anchor_lang::Discriminator; #[program] pub mod light_system_program { + use account_compression::errors::AccountCompressionErrorCode; use light_heap::{bench_sbf_end, bench_sbf_start}; use self::{ @@ -45,7 +46,7 @@ pub mod light_system_program { StateMerkleTreeAccount::DISCRIMINATOR => Ok(()), BatchedMerkleTreeAccount::DISCRIMINATOR => Ok(()), _ => { - err!(anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch) + err!(AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch) } }?; ctx.accounts @@ -65,7 +66,7 @@ pub mod light_system_program { &inputs.input_compressed_accounts_with_merkle_context, &ctx.accounts.authority.key(), )?; - process(inputs, None, ctx, 0) + process(inputs, None, ctx, 0, None, None) } pub fn invoke_cpi<'a, 'b, 'c: 'info, 'info>( @@ -77,7 +78,30 @@ pub mod light_system_program { InstructionDataInvokeCpi::deserialize(&mut inputs.as_slice())?; bench_sbf_end!("cpda_deserialize"); - process_invoke_cpi(ctx, inputs) + process_invoke_cpi(ctx, inputs, None, None) + } + + pub fn invoke_cpi_with_read_only<'a, 'b, 'c: 'info, 'info>( + ctx: Context<'a, 'b, 'c, 'info, InvokeCpiInstruction<'info>>, + inputs: Vec, + ) -> Result<()> { + bench_sbf_start!("cpda_deserialize"); + let inputs = InstructionDataInvokeCpiWithReadOnly::deserialize(&mut inputs.as_slice())?; + bench_sbf_end!("cpda_deserialize"); + // disable set cpi context because cpi context account uses InvokeCpiInstruction + if let Some(cpi_context) = inputs.invoke_cpi.cpi_context { + if cpi_context.set_context { + msg!("Cannot set cpi context in invoke_cpi_with_read_only."); + msg!("Please use invoke_cpi instead."); + return Err(SystemProgramError::InstructionNotCallable.into()); + } + } + process_invoke_cpi( + ctx, + inputs.invoke_cpi, + inputs.read_only_addresses, + inputs.read_only_accounts, + ) } /// This function is a stub to allow Anchor to include the input types in diff --git a/programs/system/src/sdk/address.rs b/programs/system/src/sdk/address.rs index 2df88c281a..2d0a4d7190 100644 --- a/programs/system/src/sdk/address.rs +++ b/programs/system/src/sdk/address.rs @@ -1,10 +1,17 @@ use std::collections::HashMap; use anchor_lang::{err, solana_program::pubkey::Pubkey, Result}; -use light_utils::hash_to_bn254_field_size_be; +use light_utils::{hash_to_bn254_field_size_be, hashv_to_bn254_field_size_be}; -use crate::{errors::SystemProgramError, NewAddressParams, NewAddressParamsPacked}; -pub fn derive_address(merkle_tree_pubkey: &Pubkey, seed: &[u8; 32]) -> Result<[u8; 32]> { +use crate::{ + errors::SystemProgramError, NewAddressParams, NewAddressParamsPacked, PackedReadOnlyAddress, + ReadOnlyAddress, +}; + +use super::compressed_account::{ + pack_merkle_context, PackedReadOnlyCompressedAccount, ReadOnlyCompressedAccount, +}; +pub fn derive_address_legacy(merkle_tree_pubkey: &Pubkey, seed: &[u8; 32]) -> Result<[u8; 32]> { let hash = match hash_to_bn254_field_size_be( [merkle_tree_pubkey.to_bytes(), *seed].concat().as_slice(), ) { @@ -15,6 +22,21 @@ pub fn derive_address(merkle_tree_pubkey: &Pubkey, seed: &[u8; 32]) -> Result<[u Ok(hash) } +pub fn derive_address( + seed: &[u8; 32], + merkle_tree_pubkey: &[u8; 32], + program_id_bytes: &[u8; 32], +) -> [u8; 32] { + hashv_to_bn254_field_size_be( + [ + seed.as_slice(), + merkle_tree_pubkey.as_slice(), + program_id_bytes.as_slice(), + ] + .as_slice(), + ) +} + pub fn add_and_get_remaining_account_indices( pubkeys: &[Pubkey], remaining_accounts: &mut HashMap, @@ -77,6 +99,48 @@ pub fn pack_new_address_params( new_address_params_packed } +pub fn pack_read_only_address_params( + new_address_params: &[ReadOnlyAddress], + remaining_accounts: &mut HashMap, +) -> Vec { + new_address_params + .iter() + .map(|x| PackedReadOnlyAddress { + address: x.address, + address_merkle_tree_root_index: x.address_merkle_tree_root_index, + address_merkle_tree_account_index: pack_account( + &x.address_merkle_tree_pubkey, + remaining_accounts, + ), + }) + .collect::>() +} + +pub fn pack_account(pubkey: &Pubkey, remaining_accounts: &mut HashMap) -> u8 { + match remaining_accounts.get(pubkey) { + Some(index) => *index as u8, + None => { + let next_index = remaining_accounts.len(); + remaining_accounts.insert(*pubkey, next_index); + next_index as u8 + } + } +} + +pub fn pack_read_only_accounts( + accounts: &[ReadOnlyCompressedAccount], + remaining_accounts: &mut HashMap, +) -> Vec { + accounts + .iter() + .map(|x| PackedReadOnlyCompressedAccount { + account_hash: x.account_hash, + merkle_context: pack_merkle_context(&[x.merkle_context], remaining_accounts)[0], + root_index: x.root_index, + }) + .collect::>() +} + #[cfg(test)] mod tests { use solana_sdk::{signature::Keypair, signer::Signer}; @@ -87,8 +151,8 @@ mod tests { fn test_derive_address_with_valid_input() { let merkle_tree_pubkey = Keypair::new().pubkey(); let seeds = [1u8; 32]; - let result = derive_address(&merkle_tree_pubkey, &seeds); - let result_2 = derive_address(&merkle_tree_pubkey, &seeds); + let result = derive_address_legacy(&merkle_tree_pubkey, &seeds); + let result_2 = derive_address_legacy(&merkle_tree_pubkey, &seeds); assert_eq!(result, result_2); } @@ -98,8 +162,8 @@ mod tests { let merkle_tree_pubkey_2 = Keypair::new().pubkey(); let seed = [2u8; 32]; - let result = derive_address(&merkle_tree_pubkey, &seed); - let result_2 = derive_address(&merkle_tree_pubkey_2, &seed); + let result = derive_address_legacy(&merkle_tree_pubkey, &seed); + let result_2 = derive_address_legacy(&merkle_tree_pubkey_2, &seed); assert_ne!(result, result_2); } diff --git a/programs/system/src/sdk/compressed_account.rs b/programs/system/src/sdk/compressed_account.rs index d152e1c07b..fa4b995a7a 100644 --- a/programs/system/src/sdk/compressed_account.rs +++ b/programs/system/src/sdk/compressed_account.rs @@ -4,6 +4,13 @@ use anchor_lang::prelude::*; use light_hasher::{Hasher, Poseidon}; use light_utils::hash_to_bn254_field_size_be; +use super::address::pack_account; + +pub trait FetchRoot { + fn get_root_index(&self) -> u16; + fn get_merkle_context(&self) -> PackedMerkleContext; +} + #[derive(Debug, PartialEq, Default, Clone, AnchorSerialize, AnchorDeserialize)] pub struct PackedCompressedAccountWithMerkleContext { pub compressed_account: CompressedAccount, @@ -13,6 +20,14 @@ pub struct PackedCompressedAccountWithMerkleContext { /// Placeholder to mark accounts read-only unimplemented set to false. pub read_only: bool, } +impl FetchRoot for PackedCompressedAccountWithMerkleContext { + fn get_root_index(&self) -> u16 { + self.root_index + } + fn get_merkle_context(&self) -> PackedMerkleContext { + self.merkle_context + } +} #[derive(Debug, PartialEq, Default, Clone, AnchorSerialize, AnchorDeserialize)] pub struct CompressedAccountWithMerkleContext { @@ -28,6 +43,73 @@ impl CompressedAccountWithMerkleContext { } } +impl CompressedAccountWithMerkleContext { + pub fn into_read_only(&self, root_index: Option) -> Result { + let account_hash = self.hash()?; + let merkle_context = if root_index.is_none() { + let mut merkle_context = self.merkle_context; + merkle_context.queue_index = Some(QueueIndex::default()); + merkle_context + } else { + self.merkle_context + }; + Ok(ReadOnlyCompressedAccount { + account_hash, + merkle_context, + root_index: root_index.unwrap_or_default(), + }) + } + + pub fn pack( + &self, + root_index: Option, + remaining_accounts: &mut HashMap, + ) -> Result { + Ok(PackedCompressedAccountWithMerkleContext { + compressed_account: self.compressed_account.clone(), + merkle_context: PackedMerkleContext { + merkle_tree_pubkey_index: pack_account( + &self.merkle_context.merkle_tree_pubkey, + remaining_accounts, + ), + nullifier_queue_pubkey_index: pack_account( + &self.merkle_context.nullifier_queue_pubkey, + remaining_accounts, + ), + leaf_index: self.merkle_context.leaf_index, + queue_index: if root_index.is_none() { + Some(QueueIndex::default()) + } else { + None + }, + }, + root_index: root_index.unwrap_or_default(), + read_only: false, + }) + } +} +#[derive(Debug, PartialEq, Default, Clone, AnchorSerialize, AnchorDeserialize)] +pub struct ReadOnlyCompressedAccount { + pub account_hash: [u8; 32], + pub merkle_context: MerkleContext, + pub root_index: u16, +} + +#[derive(Debug, PartialEq, Default, Clone, AnchorSerialize, AnchorDeserialize)] +pub struct PackedReadOnlyCompressedAccount { + pub account_hash: [u8; 32], + pub merkle_context: PackedMerkleContext, + pub root_index: u16, +} +impl FetchRoot for PackedReadOnlyCompressedAccount { + fn get_root_index(&self) -> u16 { + self.root_index + } + fn get_merkle_context(&self) -> PackedMerkleContext { + self.merkle_context + } +} + #[derive(Debug, Clone, Copy, AnchorSerialize, AnchorDeserialize, PartialEq, Default)] pub struct MerkleContext { pub merkle_tree_pubkey: Pubkey, @@ -43,8 +125,6 @@ pub struct PackedMerkleContext { pub merkle_tree_pubkey_index: u8, pub nullifier_queue_pubkey_index: u8, pub leaf_index: u32, - /// Index of leaf in queue. Placeholder of batched Merkle tree updates - /// currently unimplemented. pub queue_index: Option, } @@ -60,41 +140,18 @@ pub fn pack_merkle_context( merkle_context: &[MerkleContext], remaining_accounts: &mut HashMap, ) -> Vec { - let mut merkle_context_packed = merkle_context + merkle_context .iter() .map(|x| PackedMerkleContext { leaf_index: x.leaf_index, - merkle_tree_pubkey_index: 0, // will be assigned later - nullifier_queue_pubkey_index: 0, // will be assigned later - queue_index: None, + merkle_tree_pubkey_index: pack_account(&x.merkle_tree_pubkey, remaining_accounts), + nullifier_queue_pubkey_index: pack_account( + &x.nullifier_queue_pubkey, + remaining_accounts, + ), + queue_index: x.queue_index, }) - .collect::>(); - let mut index: usize = remaining_accounts.len(); - for (i, params) in merkle_context.iter().enumerate() { - match remaining_accounts.get(¶ms.merkle_tree_pubkey) { - Some(_) => {} - None => { - remaining_accounts.insert(params.merkle_tree_pubkey, index); - index += 1; - } - }; - merkle_context_packed[i].merkle_tree_pubkey_index = - *remaining_accounts.get(¶ms.merkle_tree_pubkey).unwrap() as u8; - } - - for (i, params) in merkle_context.iter().enumerate() { - match remaining_accounts.get(¶ms.nullifier_queue_pubkey) { - Some(_) => {} - None => { - remaining_accounts.insert(params.nullifier_queue_pubkey, index); - index += 1; - } - }; - merkle_context_packed[i].nullifier_queue_pubkey_index = *remaining_accounts - .get(¶ms.nullifier_queue_pubkey) - .unwrap() as u8; - } - merkle_context_packed + .collect::>() } #[derive(Debug, PartialEq, Default, Clone, AnchorSerialize, AnchorDeserialize)] diff --git a/programs/system/src/sdk/invoke.rs b/programs/system/src/sdk/invoke.rs index adf1d7cf72..418cc3de43 100644 --- a/programs/system/src/sdk/invoke.rs +++ b/programs/system/src/sdk/invoke.rs @@ -56,6 +56,7 @@ pub fn create_invoke_instruction( .sort_by(|a, b| a.merkle_tree_index.cmp(&b.merkle_tree_index)); } let mut inputs = Vec::new(); + println!("inputs_struct: {:?}", inputs_struct); InstructionDataInvoke::serialize(&inputs_struct, &mut inputs).unwrap(); diff --git a/test-programs/account-compression-test/Cargo.toml b/test-programs/account-compression-test/Cargo.toml index a1c1c19dd2..c8f7b70e45 100644 --- a/test-programs/account-compression-test/Cargo.toml +++ b/test-programs/account-compression-test/Cargo.toml @@ -50,4 +50,5 @@ serde_json = "1.0.114" solana-sdk = { workspace = true } thiserror = "1.0" memoffset = "0.9.1" -serial_test = "3.1.1" \ No newline at end of file +serial_test = "3.1.1" +light-bloom-filter = {path = "../../merkle-tree/bloom-filter"} \ No newline at end of file diff --git a/test-programs/account-compression-test/tests/address_merkle_tree_tests.rs b/test-programs/account-compression-test/tests/address_merkle_tree_tests.rs index 484b09db75..d38bd16a67 100644 --- a/test-programs/account-compression-test/tests/address_merkle_tree_tests.rs +++ b/test-programs/account-compression-test/tests/address_merkle_tree_tests.rs @@ -1476,6 +1476,7 @@ pub async fn test_setup_with_address_merkle_tree( queue: address_queue_keypair.pubkey(), }, rollover_fee: FeeConfig::default().address_queue_rollover as i64, + queue_elements: vec![], }; (context, payer, address_merkle_tree_bundle) } diff --git a/test-programs/account-compression-test/tests/batched_merkle_tree_test.rs b/test-programs/account-compression-test/tests/batched_merkle_tree_test.rs index 6be6dea0d1..f1d75f2232 100644 --- a/test-programs/account-compression-test/tests/batched_merkle_tree_test.rs +++ b/test-programs/account-compression-test/tests/batched_merkle_tree_test.rs @@ -10,23 +10,33 @@ use account_compression::batched_queue::{ ZeroCopyBatchedQueueAccount, }; use account_compression::errors::AccountCompressionErrorCode; -use account_compression::{assert_mt_zero_copy_inited, get_output_queue_account_default}; +use account_compression::{ + assert_address_mt_zero_copy_inited, assert_state_mt_zero_copy_inited, + get_output_queue_account_default, InitAddressTreeAccountsInstructionData, +}; use account_compression::{ batched_merkle_tree::BatchedMerkleTreeAccount, InitStateTreeAccountsInstructionData, ID, }; use anchor_lang::error::ErrorCode; use anchor_lang::prelude::AccountMeta; use anchor_lang::{AnchorSerialize, InstructionData, ToAccountMetas}; +use anchor_spl::token::Mint; use light_prover_client::gnark::helpers::{spawn_prover, ProofType, ProverConfig}; -use light_prover_client::mock_batched_forester::{MockBatchedForester, MockTxEvent}; -use light_system_program::invoke::verify_state_proof::create_tx_hash_offchain; +use light_prover_client::mock_batched_forester::{ + self, MockBatchedAddressForester, MockBatchedForester, MockTxEvent, +}; +use light_system_program::invoke::verify_state_proof::create_tx_hash; +use light_test_utils::address::insert_addresses; +use light_test_utils::spl::create_initialize_mint_instructions; use light_test_utils::test_batch_forester::assert_perform_state_mt_roll_over; use light_test_utils::test_env::NOOP_PROGRAM_ID; use light_test_utils::{ airdrop_lamports, assert_rpc_error, create_account_instruction, RpcConnection, RpcError, }; use light_test_utils::{rpc::ProgramTestRpcConnection, AccountZeroCopy}; -use light_verifier::CompressedProof; +use light_utils::bigint::bigint_to_be_bytes_array; +use light_verifier::{CompressedProof, VerifierError}; +use num_bigint::ToBigUint; use serial_test::serial; use solana_program_test::ProgramTest; use solana_sdk::account::WritableAccount; @@ -168,7 +178,7 @@ async fn test_batch_state_merkle_tree() { params.input_queue_num_batches, ); - assert_mt_zero_copy_inited( + assert_state_mt_zero_copy_inited( &mut merkle_tree.account.data.as_mut_slice(), ref_mt_account, params.bloom_filter_num_iters, @@ -638,7 +648,7 @@ pub async fn perform_insert_into_input_queue( *counter += 1; } let slot = context.get_slot().await.unwrap(); - let tx_hash = create_tx_hash_offchain(&leaves, &vec![], slot); + let tx_hash = create_tx_hash(&leaves, &vec![], slot).unwrap(); mock_indexer.tx_events.push(MockTxEvent { tx_hash, inputs: leaves.clone(), @@ -687,7 +697,7 @@ pub async fn create_append_batch_ix_data( output_queue_account_data: &mut [u8], ) -> InstructionDataBatchAppendInputs { let zero_copy_account = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(mt_account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut(mt_account_data).unwrap(); let output_zero_copy_account = ZeroCopyBatchedQueueAccount::from_bytes_mut(output_queue_account_data).unwrap(); @@ -732,7 +742,7 @@ pub async fn create_nullify_batch_ix_data( account_data: &mut [u8], ) -> InstructionDataBatchNullifyInputs { let zero_copy_account: ZeroCopyBatchedMerkleTreeAccount = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data).unwrap(); + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut(account_data).unwrap(); println!("batches {:?}", zero_copy_account.batches); let old_root_index = zero_copy_account.root_history.last_index(); @@ -840,7 +850,7 @@ async fn test_init_batch_state_merkle_trees() { ); let mut tree_data = merkle_tree.account.data.clone(); - assert_mt_zero_copy_inited( + assert_state_mt_zero_copy_inited( &mut tree_data.as_mut_slice(), ref_mt_account, params.bloom_filter_num_iters, @@ -1280,3 +1290,721 @@ pub async fn perform_rollover_batch_state_merkle_tree( ) .await?) } + +pub async fn perform_init_batch_state_merkle_tree_and_queue( + context: &mut ProgramTestRpcConnection, + params: &InitStateTreeAccountsInstructionData, + merkle_tree_keypair: &Keypair, + nullifier_queue_keypair: &Keypair, +) -> Result<(u64, Signature), RpcError> { + let payer = context.get_payer().insecure_clone(); + let payer_pubkey = context.get_payer().pubkey(); + let merkle_tree_pubkey = merkle_tree_keypair.pubkey(); + let output_queue_pubkey = nullifier_queue_keypair.pubkey(); + let queue_account_size = get_output_queue_account_size( + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.output_queue_num_batches, + ); + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + let queue_rent = context + .get_minimum_balance_for_rent_exemption(queue_account_size) + .await + .unwrap(); + let create_queue_account_ix = create_account_instruction( + &payer_pubkey, + queue_account_size, + queue_rent, + &ID, + Some(&nullifier_queue_keypair), + ); + let mt_rent = context + .get_minimum_balance_for_rent_exemption(mt_account_size) + .await + .unwrap(); + let additional_bytes_rent = context + .get_minimum_balance_for_rent_exemption(params.additional_bytes as usize) + .await + .unwrap(); + let total_rent = queue_rent + mt_rent + additional_bytes_rent; + let create_mt_account_ix = create_account_instruction( + &payer_pubkey, + mt_account_size, + mt_rent, + &ID, + Some(&merkle_tree_keypair), + ); + + let instruction = + account_compression::instruction::InitializeBatchedStateMerkleTree { params: *params }; + let accounts = account_compression::accounts::InitializeBatchedStateMerkleTreeAndQueue { + authority: context.get_payer().pubkey(), + merkle_tree: merkle_tree_pubkey, + queue: output_queue_pubkey, + registered_program_pda: None, + }; + + let instruction = Instruction { + program_id: ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction.data(), + }; + let signature = context + .create_and_send_transaction( + &[create_queue_account_ix, create_mt_account_ix, instruction], + &payer_pubkey, + &[&payer, &nullifier_queue_keypair, &merkle_tree_keypair], + ) + .await?; + Ok((total_rent, signature)) +} + +#[serial] +#[tokio::test] +async fn test_init_batch_address_merkle_trees() { + let mut program_test = ProgramTest::default(); + program_test.add_program("account_compression", ID, None); + program_test.add_program( + "spl_noop", + Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY), + None, + ); + program_test.set_compute_max_units(1_400_000u64); + let context = program_test.start_with_context().await; + let mut context = ProgramTestRpcConnection { context }; + + let params = InitAddressTreeAccountsInstructionData::test_default(); + let e2e_test_params = InitAddressTreeAccountsInstructionData::e2e_test_default(); + let default_params = InitAddressTreeAccountsInstructionData::default(); + let param_vec = vec![params, e2e_test_params, default_params]; + for params in param_vec.iter() { + println!("Init new mt with params {:?}", params); + let merkle_tree_keypair = Keypair::new(); + let merkle_tree_pubkey = merkle_tree_keypair.pubkey(); + + let owner = context.get_payer().pubkey(); + + let (mt_rent, _) = + perform_init_batch_address_merkle_tree(&mut context, params, &merkle_tree_keypair) + .await + .unwrap(); + let merkle_tree = + AccountZeroCopy::::new(&mut context, merkle_tree_pubkey) + .await; + let ref_mt_account = BatchedMerkleTreeAccount::get_address_tree_default( + owner, + None, + None, + params.rollover_threshold, + 0, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + mt_rent, + ); + + let mut tree_data = merkle_tree.account.data.clone(); + assert_address_mt_zero_copy_inited( + &mut tree_data.as_mut_slice(), + ref_mt_account, + params.bloom_filter_num_iters, + ); + } +} +pub async fn perform_init_batch_address_merkle_tree( + context: &mut ProgramTestRpcConnection, + params: &InitAddressTreeAccountsInstructionData, + merkle_tree_keypair: &Keypair, +) -> Result<(u64, Signature), RpcError> { + let payer = context.get_payer().insecure_clone(); + let payer_pubkey = context.get_payer().pubkey(); + let merkle_tree_pubkey = merkle_tree_keypair.pubkey(); + + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + let mt_rent = context + .get_minimum_balance_for_rent_exemption(mt_account_size) + .await + .unwrap(); + let create_mt_account_ix = create_account_instruction( + &payer_pubkey, + mt_account_size, + mt_rent, + &ID, + Some(&merkle_tree_keypair), + ); + + let instruction = + account_compression::instruction::IntializeBatchedAddressMerkleTree { params: *params }; + let accounts = account_compression::accounts::InitializeBatchAddressMerkleTree { + authority: context.get_payer().pubkey(), + merkle_tree: merkle_tree_pubkey, + registered_program_pda: None, + }; + + let instruction = Instruction { + program_id: ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction.data(), + }; + let res = context + .create_and_send_transaction( + &[create_mt_account_ix, instruction], + &payer_pubkey, + &[&payer, &merkle_tree_keypair], + ) + .await?; + Ok((mt_rent, res)) +} + +#[serial] +#[tokio::test] +async fn test_batch_address_merkle_trees() { + let mut program_test = ProgramTest::default(); + program_test.add_program("account_compression", ID, None); + program_test.add_program( + "spl_noop", + Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY), + None, + ); + program_test.set_compute_max_units(1_400_000u64); + let context = program_test.start_with_context().await; + let mut context = ProgramTestRpcConnection { context }; + let mut mock_indexer = mock_batched_forester::MockBatchedAddressForester::<26>::default(); + let payer = context.get_payer().insecure_clone(); + let mut params = InitAddressTreeAccountsInstructionData::test_default(); + // set rollover threshold to 0 to test rollover. + params.rollover_threshold = Some(0); + params.network_fee = Some(1); + let merkle_tree_keypair = Keypair::new(); + let address_merkle_tree_pubkey = merkle_tree_keypair.pubkey(); + + perform_init_batch_address_merkle_tree(&mut context, ¶ms, &merkle_tree_keypair) + .await + .unwrap(); + + let state_merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + { + let params = InitStateTreeAccountsInstructionData::test_default(); + perform_init_batch_state_merkle_tree_and_queue( + &mut context, + ¶ms, + &state_merkle_tree_keypair, + &nullifier_queue_keypair, + ) + .await + .unwrap(); + } + + // Insert a pair of addresses. + let address1 = 10001_u32.to_biguint().unwrap(); + let address2 = 10000_u32.to_biguint().unwrap(); + let addresses: Vec<[u8; 32]> = vec![ + bigint_to_be_bytes_array(&address1).unwrap(), + bigint_to_be_bytes_array(&address2).unwrap(), + ]; + // 1. Functional: inserts two addresses to the queue + insert_addresses( + &mut context, + address_merkle_tree_pubkey, + address_merkle_tree_pubkey, + addresses.clone(), + ) + .await + .unwrap(); + mock_indexer.queue_leaves.push(addresses[0]); + mock_indexer.queue_leaves.push(addresses[1]); + // TODO: assert complete queue state + + // 2. Failing: reinsert the same addresses + { + let result = insert_addresses( + &mut context, + address_merkle_tree_pubkey, + address_merkle_tree_pubkey, + addresses.clone(), + ) + .await; + assert_rpc_error(result, 0, light_bloom_filter::BloomFilterError::Full.into()).unwrap(); + } + // 3. Failing: invalid account + { + let result = insert_addresses( + &mut context, + nullifier_queue_keypair.pubkey(), + state_merkle_tree_keypair.pubkey(), + addresses.clone(), + ) + .await; + assert_rpc_error(result, 0, ErrorCode::AccountDiscriminatorMismatch.into()).unwrap(); + + let result = insert_addresses( + &mut context, + state_merkle_tree_keypair.pubkey(), + state_merkle_tree_keypair.pubkey(), + addresses.clone(), + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidTreeType.into(), + ) + .unwrap(); + } + // fill address queue batch + { + for i in (1..params.input_queue_batch_size).step_by(2) { + let address_1 = (i as u32).to_biguint().unwrap(); + let address_1 = bigint_to_be_bytes_array(&address_1).unwrap(); + let address_2 = ((i + 1) as u32).to_biguint().unwrap(); + let address_2 = bigint_to_be_bytes_array(&address_2).unwrap(); + mock_indexer.queue_leaves.push(address_1); + mock_indexer.queue_leaves.push(address_2); + insert_addresses( + &mut context, + address_merkle_tree_pubkey, + address_merkle_tree_pubkey, + vec![address_1, address_2], + ) + .await + .unwrap(); + } + } + spawn_prover( + true, + ProverConfig { + run_mode: None, + circuits: vec![ProofType::BatchAddressAppendTest], + }, + ) + .await; + // 4. Functional: update batch address tree + { + update_batch_address_tree( + &mut context, + &mut mock_indexer, + address_merkle_tree_pubkey, + &payer, + None, + UpdateBatchAddressTreeTestMode::Functional, + ) + .await + .unwrap(); + } + // 5. Failing: invalid proof + // 6. Failing: invalid new root + // 7. Failing: invalid root index + // 8. Failing: update twice with the same instruction (proof and public inputs) + for (mode, ix_index) in vec![ + UpdateBatchAddressTreeTestMode::InvalidProof, + UpdateBatchAddressTreeTestMode::InvalidNewRoot, + UpdateBatchAddressTreeTestMode::InvalidRootIndex, + UpdateBatchAddressTreeTestMode::UpdateTwice, + ] + .iter() + .zip(vec![0, 0, 0, 1]) + { + let mut mock_indexer = mock_indexer.clone(); + let result = update_batch_address_tree( + &mut context, + &mut mock_indexer, + address_merkle_tree_pubkey, + &payer, + None, + *mode, + ) + .await; + assert_rpc_error( + result, + ix_index, + VerifierError::ProofVerificationFailed.into(), + ) + .unwrap(); + } + // 9. Failing: invalid tree account (state tree account) + { + let mut mock_indexer = mock_indexer.clone(); + println!("invalid tree account"); + let result = update_batch_address_tree( + &mut context, + &mut mock_indexer, + address_merkle_tree_pubkey, + &payer, + Some(state_merkle_tree_keypair.pubkey()), + UpdateBatchAddressTreeTestMode::Functional, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidTreeType.into(), + ) + .unwrap(); + } + // 10. Failing: invalid tree account (invalid discriminator) + { + let mut mock_indexer = mock_indexer.clone(); + let result = update_batch_address_tree( + &mut context, + &mut mock_indexer, + address_merkle_tree_pubkey, + &payer, + Some(nullifier_queue_keypair.pubkey()), + UpdateBatchAddressTreeTestMode::Functional, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidDiscriminator.into(), + ) + .unwrap(); + } + let mint = Keypair::new(); + // 11. Failing: invalid tree account (invalid program owner) + { + let payer_pubkey = context.get_payer().pubkey(); + let rent = context + .get_minimum_balance_for_rent_exemption(Mint::LEN) + .await + .unwrap(); + + let (instructions, _) = + create_initialize_mint_instructions(&payer_pubkey, &payer_pubkey, rent, 2, &mint); + + context + .create_and_send_transaction(&instructions[..2], &payer_pubkey, &[&payer, &mint]) + .await + .unwrap(); + + let mut mock_indexer = mock_indexer.clone(); + let result = update_batch_address_tree( + &mut context, + &mut mock_indexer, + address_merkle_tree_pubkey, + &payer, + Some(mint.pubkey()), + UpdateBatchAddressTreeTestMode::Functional, + ) + .await; + assert_rpc_error(result, 0, ErrorCode::AccountOwnedByWrongProgram.into()).unwrap(); + } + // 12. functional: rollover + let (_, new_address_merkle_tree) = { + rollover_batch_address_merkle_tree( + &mut context, + address_merkle_tree_pubkey, + &payer, + RolloverBatchAddressTreeTestMode::Functional, + ) + .await + .unwrap() + }; + let invalid_authority = Keypair::new(); + airdrop_lamports(&mut context, &invalid_authority.pubkey(), 100_000_000_000) + .await + .unwrap(); + // 13. Failing: already rolled over + { + let result = rollover_batch_address_merkle_tree( + &mut context, + address_merkle_tree_pubkey, + &payer, + RolloverBatchAddressTreeTestMode::Functional, + ) + .await; + assert_rpc_error( + result, + 1, + AccountCompressionErrorCode::MerkleTreeAlreadyRolledOver.into(), + ) + .unwrap(); + } + // 14. Failing: invalid authority + { + let result = rollover_batch_address_merkle_tree( + &mut context, + new_address_merkle_tree, + &invalid_authority, + RolloverBatchAddressTreeTestMode::Functional, + ) + .await; + assert_rpc_error( + result, + 1, + AccountCompressionErrorCode::InvalidAuthority.into(), + ) + .unwrap(); + } + // 15. Failing: account too small + { + let result = rollover_batch_address_merkle_tree( + &mut context, + new_address_merkle_tree, + &payer, + RolloverBatchAddressTreeTestMode::InvalidNewAccountSizeSmall, + ) + .await; + assert_rpc_error( + result, + 1, + AccountCompressionErrorCode::InvalidAccountSize.into(), + ) + .unwrap(); + } + // 15. Failing: Account too large + { + let result = rollover_batch_address_merkle_tree( + &mut context, + new_address_merkle_tree, + &payer, + RolloverBatchAddressTreeTestMode::InvalidNewAccountSizeLarge, + ) + .await; + assert_rpc_error( + result, + 1, + AccountCompressionErrorCode::InvalidAccountSize.into(), + ) + .unwrap(); + } + // 16. invalid network fee + { + let mut params = InitAddressTreeAccountsInstructionData::test_default(); + // set rollover threshold to 0 to test rollover. + params.rollover_threshold = Some(0); + params.network_fee = None; + params.forester = Some(Pubkey::new_unique()); + let merkle_tree_keypair = Keypair::new(); + let address_merkle_tree_pubkey = merkle_tree_keypair.pubkey(); + + perform_init_batch_address_merkle_tree(&mut context, ¶ms, &merkle_tree_keypair) + .await + .unwrap(); + let result = rollover_batch_address_merkle_tree( + &mut context, + address_merkle_tree_pubkey, + &payer, + RolloverBatchAddressTreeTestMode::Functional, + ) + .await; + assert_rpc_error( + result, + 1, + AccountCompressionErrorCode::InvalidNetworkFee.into(), + ) + .unwrap(); + } +} +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RolloverBatchAddressTreeTestMode { + Functional, + InvalidNewAccountSizeSmall, + InvalidNewAccountSizeLarge, +} + +pub async fn rollover_batch_address_merkle_tree( + context: &mut ProgramTestRpcConnection, + address_merkle_tree_pubkey: Pubkey, + payer: &Keypair, + mode: RolloverBatchAddressTreeTestMode, +) -> Result<(Signature, Pubkey), RpcError> { + let new_address_merkle_tree_keypair = Keypair::new(); + let payer_pubkey = payer.pubkey(); + let params = InitAddressTreeAccountsInstructionData::test_default(); + let mut mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + if mode == RolloverBatchAddressTreeTestMode::InvalidNewAccountSizeSmall { + mt_account_size -= 1; + } else if mode == RolloverBatchAddressTreeTestMode::InvalidNewAccountSizeLarge { + mt_account_size += 1; + } + let mt_rent = context + .get_minimum_balance_for_rent_exemption(mt_account_size) + .await + .unwrap(); + let create_mt_account_ix = create_account_instruction( + &payer_pubkey, + mt_account_size, + mt_rent, + &ID, + Some(&new_address_merkle_tree_keypair), + ); + let instruction_data = account_compression::instruction::RolloverBatchAddressMerkleTree { + network_fee: params.network_fee, + }; + let accounts = account_compression::accounts::RolloverBatchAddressMerkleTree { + authority: payer_pubkey, + old_address_merkle_tree: address_merkle_tree_pubkey, + new_address_merkle_tree: new_address_merkle_tree_keypair.pubkey(), + registered_program_pda: None, + fee_payer: payer_pubkey, + }; + let instruction = Instruction { + program_id: ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + }; + + Ok(( + context + .create_and_send_transaction( + &[create_mt_account_ix, instruction], + &payer_pubkey, + &[&payer, &new_address_merkle_tree_keypair], + ) + .await?, + new_address_merkle_tree_keypair.pubkey(), + )) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum UpdateBatchAddressTreeTestMode { + Functional, + InvalidProof, + InvalidNewRoot, + InvalidRootIndex, + UpdateTwice, +} + +/// 1. Insert addresses into the address queue +/// 2. invalid proof +/// 3. invalid new_root +/// 4. invalid root index +/// 5. update twice with the same instruction (proof and public inputs) +/// 6. invalid tree account +pub async fn update_batch_address_tree( + context: &mut ProgramTestRpcConnection, + mock_indexer: &mut MockBatchedAddressForester<26>, + address_merkle_tree_pubkey: Pubkey, + payer: &Keypair, + invalid_tree: Option, + mode: UpdateBatchAddressTreeTestMode, +) -> Result { + let mut merkle_tree_account_data = context + .get_account(address_merkle_tree_pubkey) + .await + .unwrap() + .unwrap() + .data; + + let zero_copy_account = ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + &mut merkle_tree_account_data, + ) + .unwrap(); + let start_index = zero_copy_account.get_account().next_index; + + let mut old_root_index = zero_copy_account.root_history.last_index(); + let current_root = zero_copy_account + .root_history + .get(old_root_index as usize) + .unwrap(); + let next_full_batch = zero_copy_account.get_account().queue.next_full_batch_index; + + let batch = zero_copy_account + .batches + .get(next_full_batch as usize) + .unwrap(); + let batch_start_index = batch.start_index; + let leaves_hashchain = zero_copy_account + .hashchain_store + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (mut proof, mut new_root) = mock_indexer + .get_batched_address_proof( + zero_copy_account.get_account().queue.batch_size as u32, + zero_copy_account.get_account().queue.zkp_batch_size as u32, + *leaves_hashchain, + start_index as usize, + batch_start_index as usize, + *current_root, + ) + .await + .unwrap(); + if mode == UpdateBatchAddressTreeTestMode::InvalidRootIndex { + old_root_index -= 1; + } + if mode == UpdateBatchAddressTreeTestMode::InvalidNewRoot { + new_root[0] = new_root[0].wrapping_add(1); + } + if mode == UpdateBatchAddressTreeTestMode::InvalidProof { + proof.a = proof.c; + } + let instruction_data = InstructionDataBatchNullifyInputs { + public_inputs: BatchProofInputsIx { + new_root, + old_root_index: old_root_index as u16, + }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + let instruction_data = account_compression::instruction::BatchUpdateAddressTree { + data: instruction_data.try_to_vec().unwrap(), + }; + + let merkle_tree = if let Some(invalid_tree) = invalid_tree { + invalid_tree + } else { + address_merkle_tree_pubkey + }; + + let accounts = account_compression::accounts::BatchUpdateAddressTree { + authority: context.get_payer().pubkey(), + registered_program_pda: None, + log_wrapper: NOOP_PROGRAM_ID, + merkle_tree, + }; + let instructions = if mode == UpdateBatchAddressTreeTestMode::UpdateTwice { + vec![ + Instruction { + program_id: ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + }, + Instruction { + program_id: ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + }, + ] + } else { + vec![Instruction { + program_id: ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + }] + }; + context + .create_and_send_transaction(&instructions, &payer.pubkey(), &[&payer]) + .await +} diff --git a/test-programs/compressed-token-test/tests/test.rs b/test-programs/compressed-token-test/tests/test.rs index 13113b18f5..0f5b652967 100644 --- a/test-programs/compressed-token-test/tests/test.rs +++ b/test-programs/compressed-token-test/tests/test.rs @@ -1,5 +1,6 @@ #![cfg(feature = "test-sbf")] +use account_compression::errors::AccountCompressionErrorCode; use anchor_lang::{ system_program, AnchorDeserialize, AnchorSerialize, InstructionData, ToAccountMetas, }; @@ -2600,7 +2601,7 @@ async fn failing_tests_burn() { assert_rpc_error( res, 0, - anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch.into(), + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch.into(), ) .unwrap(); } @@ -4084,7 +4085,7 @@ async fn test_invalid_inputs() { &mut rpc, change_out_compressed_account_0, transfer_recipient_out_compressed_account_0, - &nullifier_queue_pubkey, + &merkle_tree_pubkey, &nullifier_queue_pubkey, &payer, &Some(proof_rpc_result.proof.clone()), @@ -4093,11 +4094,8 @@ async fn test_invalid_inputs() { true, ) .await; - assert_custom_error_or_program_error( - res, - anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch.into(), - ) - .unwrap(); + assert_custom_error_or_program_error(res, VerifierError::ProofVerificationFailed.into()) + .unwrap(); } // Test 12: invalid Merkle tree pubkey { @@ -4117,7 +4115,7 @@ async fn test_invalid_inputs() { assert_custom_error_or_program_error( res, - anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch.into(), + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch.into(), ) .unwrap(); } diff --git a/test-programs/create-address-test-program/Cargo.toml b/test-programs/create-address-test-program/Cargo.toml new file mode 100644 index 0000000000..1035450431 --- /dev/null +++ b/test-programs/create-address-test-program/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "create-address-test-program" +version = "1.0.0" +description = "Test program using generalized account compression" +repository = "https://github.com/Lightprotocol/light-protocol" +license = "Apache-2.0" +edition = "2021" + +[lib] +crate-type = ["cdylib", "lib"] +name = "create_address_test_program" + +[features] +no-entrypoint = [] +no-idl = [] +no-log-ix-name = [] +cpi = ["no-entrypoint"] +test-sbf = [] +custom-heap = [] +default = ["custom-heap"] + +[dependencies] +anchor-lang = { workspace = true } +light-system-program = { workspace = true } +account-compression = { workspace = true } +light-hasher = { path = "../../merkle-tree/hasher", version = "1.1.0" } +light-utils = { path = "../../utils", version = "1.1.0" } diff --git a/test-programs/create-address-test-program/Xargo.toml b/test-programs/create-address-test-program/Xargo.toml new file mode 100644 index 0000000000..475fb71ed1 --- /dev/null +++ b/test-programs/create-address-test-program/Xargo.toml @@ -0,0 +1,2 @@ +[target.bpfel-unknown-unknown.dependencies.std] +features = [] diff --git a/test-programs/create-address-test-program/src/create_pda.rs b/test-programs/create-address-test-program/src/create_pda.rs new file mode 100644 index 0000000000..12a194ddf4 --- /dev/null +++ b/test-programs/create-address-test-program/src/create_pda.rs @@ -0,0 +1,160 @@ +use account_compression::{program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED}; +use anchor_lang::prelude::*; +use light_hasher::{errors::HasherError, DataHasher, Poseidon}; +use light_system_program::{ + invoke::processor::CompressedProof, + program::LightSystemProgram, + sdk::{ + address::derive_address, + compressed_account::{CompressedAccount, CompressedAccountData}, + CompressedCpiContext, + }, + InstructionDataInvokeCpi, NewAddressParamsPacked, OutputCompressedAccountWithPackedContext, +}; + +pub fn process_create_pda<'info>( + ctx: Context<'_, '_, '_, 'info, CreateCompressedPda<'info>>, + data: [u8; 31], + proof: Option, + new_address_params: NewAddressParamsPacked, + bump: u8, +) -> Result<()> { + let compressed_pda = create_compressed_pda_data(data, &ctx, &new_address_params)?; + cpi_compressed_pda_transfer_as_program( + &ctx, + proof, + new_address_params, + compressed_pda, + None, + bump, + ) +} + +fn cpi_compressed_pda_transfer_as_program<'info>( + ctx: &Context<'_, '_, '_, 'info, CreateCompressedPda<'info>>, + proof: Option, + new_address_params: NewAddressParamsPacked, + compressed_pda: OutputCompressedAccountWithPackedContext, + cpi_context: Option, + bump: u8, +) -> Result<()> { + let invoking_program = ctx.accounts.self_program.to_account_info(); + + let inputs_struct = InstructionDataInvokeCpi { + relay_fee: None, + input_compressed_accounts_with_merkle_context: Vec::new(), + output_compressed_accounts: vec![compressed_pda], + proof, + new_address_params: vec![new_address_params], + compress_or_decompress_lamports: None, + is_compress: false, + cpi_context, + }; + // defining seeds again so that the cpi doesn't fail we want to test the check in the compressed pda program + let seeds: [&[u8]; 2] = [CPI_AUTHORITY_PDA_SEED, &[bump]]; + let mut inputs = Vec::new(); + InstructionDataInvokeCpi::serialize(&inputs_struct, &mut inputs).unwrap(); + + let cpi_accounts = light_system_program::cpi::accounts::InvokeCpiInstruction { + fee_payer: ctx.accounts.signer.to_account_info(), + authority: ctx.accounts.cpi_signer.to_account_info(), + registered_program_pda: ctx.accounts.registered_program_pda.to_account_info(), + noop_program: ctx.accounts.noop_program.to_account_info(), + account_compression_authority: ctx.accounts.account_compression_authority.to_account_info(), + account_compression_program: ctx.accounts.account_compression_program.to_account_info(), + invoking_program, + sol_pool_pda: None, + decompression_recipient: None, + system_program: ctx.accounts.system_program.to_account_info(), + cpi_context_account: None, + }; + + let signer_seeds: [&[&[u8]]; 1] = [&seeds[..]]; + + let mut cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.light_system_program.to_account_info(), + cpi_accounts, + &signer_seeds, + ); + + cpi_ctx.remaining_accounts = ctx.remaining_accounts.to_vec(); + + light_system_program::cpi::invoke_cpi(cpi_ctx, inputs)?; + Ok(()) +} + +fn create_compressed_pda_data( + data: [u8; 31], + ctx: &Context<'_, '_, '_, '_, CreateCompressedPda<'_>>, + new_address_params: &NewAddressParamsPacked, +) -> Result { + let timelock_compressed_pda = RegisteredUser { + user_pubkey: *ctx.accounts.signer.key, + data, + }; + let compressed_account_data = CompressedAccountData { + discriminator: 1u64.to_le_bytes(), + data: timelock_compressed_pda.try_to_vec().unwrap(), + data_hash: timelock_compressed_pda + .hash::() + .map_err(ProgramError::from)?, + }; + let mut discriminator_bytes = [0u8; 8]; + + discriminator_bytes.copy_from_slice( + &ctx.remaining_accounts[new_address_params.address_merkle_tree_account_index as usize] + .try_borrow_data()?[0..8], + ); + let address = derive_address( + &new_address_params.seed, + &ctx.remaining_accounts[new_address_params.address_merkle_tree_account_index as usize] + .key() + .to_bytes(), + &crate::ID.to_bytes(), + ); + + Ok(OutputCompressedAccountWithPackedContext { + compressed_account: CompressedAccount { + owner: crate::ID, // should be crate::ID, test can provide an invalid owner + lamports: 0, + address: Some(address), + data: Some(compressed_account_data), + }, + merkle_tree_index: 0, + }) +} + +#[derive(AnchorDeserialize, AnchorSerialize, Debug, Clone)] +pub struct RegisteredUser { + pub user_pubkey: Pubkey, + pub data: [u8; 31], +} + +impl light_hasher::DataHasher for RegisteredUser { + fn hash(&self) -> std::result::Result<[u8; 32], HasherError> { + let truncated_user_pubkey = + light_utils::hash_to_bn254_field_size_be(&self.user_pubkey.to_bytes()) + .unwrap() + .0; + + H::hashv(&[truncated_user_pubkey.as_slice(), self.data.as_slice()]) + } +} + +#[derive(Accounts)] +pub struct CreateCompressedPda<'info> { + #[account(mut)] + pub signer: Signer<'info>, + pub light_system_program: Program<'info, LightSystemProgram>, + pub account_compression_program: Program<'info, AccountCompression>, + /// CHECK: + pub account_compression_authority: AccountInfo<'info>, + /// CHECK: + pub registered_program_pda: AccountInfo<'info>, + /// CHECK: + pub noop_program: AccountInfo<'info>, + pub self_program: Program<'info, crate::program::SystemCpiTest>, + /// CHECK: + pub cpi_signer: AccountInfo<'info>, + pub system_program: Program<'info, System>, +} diff --git a/test-programs/create-address-test-program/src/lib.rs b/test-programs/create-address-test-program/src/lib.rs new file mode 100644 index 0000000000..426e2dd856 --- /dev/null +++ b/test-programs/create-address-test-program/src/lib.rs @@ -0,0 +1,27 @@ +#![allow(clippy::too_many_arguments)] + +use anchor_lang::prelude::*; +use anchor_lang::solana_program::pubkey::Pubkey; +use light_system_program::invoke::processor::CompressedProof; +pub mod create_pda; +pub use create_pda::*; +use light_system_program::NewAddressParamsPacked; + +declare_id!("FNt7byTHev1k5x2cXZLBr8TdWiC3zoP5vcnZR4P682Uy"); + +#[program] + +pub mod system_cpi_test { + + use super::*; + + pub fn create_compressed_pda<'info>( + ctx: Context<'_, '_, '_, 'info, CreateCompressedPda<'info>>, + data: [u8; 31], + proof: Option, + new_address_parameters: NewAddressParamsPacked, + bump: u8, + ) -> Result<()> { + process_create_pda(ctx, data, proof, new_address_parameters, bump) + } +} diff --git a/test-programs/e2e-test/tests/test.rs b/test-programs/e2e-test/tests/test.rs index a581fac681..be2191984f 100644 --- a/test-programs/e2e-test/tests/test.rs +++ b/test-programs/e2e-test/tests/test.rs @@ -1,6 +1,8 @@ #![cfg(feature = "test-sbf")] -use account_compression::InitStateTreeAccountsInstructionData; +use account_compression::{ + InitAddressTreeAccountsInstructionData, InitStateTreeAccountsInstructionData, +}; use light_prover_client::gnark::helpers::{ProofType, ProverConfig}; use light_registry::protocol_config::state::ProtocolConfig; use light_test_utils::e2e_test_env::{E2ETestEnv, GeneralActionConfig, KeypairActionConfig}; @@ -22,6 +24,7 @@ async fn test_10_all() { ..ProtocolConfig::default() }; let params = InitStateTreeAccountsInstructionData::e2e_test_default(); + let address_params = InitAddressTreeAccountsInstructionData::e2e_test_default(); let (rpc, env_accounts) = setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params( @@ -29,6 +32,7 @@ async fn test_10_all() { protocol_config, true, params, + address_params, ) .await; diff --git a/test-programs/registry-test/tests/tests.rs b/test-programs/registry-test/tests/tests.rs index 483db17a3b..6a9cc4ded5 100644 --- a/test-programs/registry-test/tests/tests.rs +++ b/test-programs/registry-test/tests/tests.rs @@ -1,11 +1,14 @@ // #![cfg(feature = "test-sbf")] -use account_compression::batched_merkle_tree::ZeroCopyBatchedMerkleTreeAccount; +use account_compression::batched_merkle_tree::{ + BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount, +}; use account_compression::{ - AddressMerkleTreeConfig, AddressQueueConfig, InitStateTreeAccountsInstructionData, + assert_address_mt_zero_copy_inited, AddressMerkleTreeConfig, AddressQueueConfig, + InitAddressTreeAccountsInstructionData, InitStateTreeAccountsInstructionData, NullifierQueueConfig, StateMerkleTreeConfig, }; -use anchor_lang::{InstructionData, ToAccountMetas}; +use anchor_lang::{AnchorSerialize, InstructionData, ToAccountMetas}; use forester_utils::airdrop_lamports; use forester_utils::forester_epoch::get_epoch_phases; use light_program_test::test_env::{ @@ -15,9 +18,11 @@ use light_program_test::test_env::{ setup_test_programs_with_accounts, setup_test_programs_with_accounts_with_protocol_config, EnvAccountKeypairs, GROUP_PDA_SEED_TEST_KEYPAIR, OLD_REGISTRY_ID_TEST_KEYPAIR, }; +use light_prover_client::gnark::helpers::{spawn_prover, ProofType, ProverConfig}; use light_program_test::test_rpc::ProgramTestRpcConnection; use light_registry::account_compression_cpi::sdk::{ - create_batch_append_instruction, create_batch_nullify_instruction, create_nullify_instruction, + create_batch_append_instruction, create_batch_nullify_instruction, + create_batch_update_address_tree_instruction, create_nullify_instruction, create_update_address_merkle_tree_instruction, CreateNullifyInstructionInputs, UpdateAddressMerkleTreeInstructionInputs, }; @@ -36,13 +41,15 @@ use light_test_utils::assert_epoch::{ assert_epoch_pda, assert_finalized_epoch_registration, assert_registered_forester_pda, assert_report_work, fetch_epoch_and_forester_pdas, }; +use light_test_utils::create_address_test_program_sdk::perform_create_pda_with_event_rnd; use light_test_utils::e2e_test_env::{init_program_test_env, init_program_test_env_forester}; use light_test_utils::indexer::TestIndexer; use light_test_utils::rpc::ProgramTestRpcConnection; use light_test_utils::test_batch_forester::{ assert_perform_state_mt_roll_over, create_append_batch_ix_data, + create_batch_address_merkle_tree, create_batch_update_address_tree_instruction_data_with_proof, create_batched_state_merkle_tree, perform_batch_append, perform_batch_nullify, - perform_rollover_batch_state_merkle_tree, + perform_rollover_batch_address_merkle_tree, perform_rollover_batch_state_merkle_tree, }; use light_test_utils::test_env::{ create_address_merkle_tree_and_queue_account, create_state_merkle_tree_and_queue_account, @@ -58,9 +65,11 @@ use light_test_utils::{ assert_rpc_error, create_address_merkle_tree_and_queue_account_with_assert, create_rollover_address_merkle_tree_instructions, create_rollover_state_merkle_tree_instructions, register_test_forester, update_test_forester, - Epoch, RpcConnection, SolanaRpcConnection, SolanaRpcUrl, TreeAccounts, TreeType, + Epoch, RpcConnection, RpcError, SolanaRpcConnection, SolanaRpcUrl, TreeAccounts, TreeType, + CREATE_ADDRESS_TEST_PROGRAM_ID, }; use serial_test::serial; +use solana_sdk::signature::Signature; use solana_sdk::{ instruction::Instruction, native_token::LAMPORTS_PER_SOL, @@ -529,7 +538,7 @@ async fn test_custom_forester() { let cpi_context_keypair = Keypair::new(); // create work 1 item in address and nullifier queue each let (mut state_merkle_tree_bundle, _, mut rpc) = { - let mut e2e_env = init_program_test_env(rpc, &env).await; + let mut e2e_env = init_program_test_env(rpc, &env, false).await; e2e_env.indexer.state_merkle_trees.clear(); // add state merkle tree to the indexer e2e_env @@ -604,6 +613,7 @@ async fn test_custom_forester_batched() { ProtocolConfig::default(), true, tree_params, + InitAddressTreeAccountsInstructionData::test_default(), ) .await; @@ -623,7 +633,7 @@ async fn test_custom_forester_batched() { e2e_env.keypair_action_config.fee_assert = false; e2e_env } else { - init_program_test_env(rpc, &env).await + init_program_test_env(rpc, &env, false).await }; e2e_env.indexer.state_merkle_trees.clear(); // add state merkle tree to the indexer @@ -650,9 +660,10 @@ async fn test_custom_forester_batched() { .await .unwrap() .unwrap(); - let merkle_tree = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut merkle_tree_account.data) - .unwrap(); + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut merkle_tree_account.data, + ) + .unwrap(); // fill two output and one input batch for i in 0..merkle_tree.get_account().queue.batch_size { println!("\ntx {}", i); @@ -890,7 +901,7 @@ async fn test_register_and_update_forester_pda() { // create work 1 item in address and nullifier queue each let (mut state_merkle_tree_bundle, mut address_merkle_tree, mut rpc) = { - let mut e2e_env = init_program_test_env(rpc, &env).await; + let mut e2e_env = init_program_test_env(rpc, &env, false).await; // remove batched Merkle tree, fee assert makes this test flaky otherwise e2e_env.indexer.state_merkle_trees.remove(1); e2e_env.create_address(None, None).await; @@ -1342,6 +1353,7 @@ async fn test_rollover_batch_state_tree() { ProtocolConfig::default(), true, params, + InitAddressTreeAccountsInstructionData::test_default(), ) .await; let payer = rpc.get_payer().insecure_clone(); @@ -1439,6 +1451,7 @@ async fn test_rollover_batch_state_tree() { ProtocolConfig::default(), true, params, + InitAddressTreeAccountsInstructionData::test_default(), ) .await; airdrop_lamports(&mut rpc, &custom_forester.pubkey(), 10_000_000_000) @@ -1522,3 +1535,272 @@ async fn test_rollover_batch_state_tree() { } } } +#[serial] +#[tokio::test] +async fn test_batch_address_tree() { + let tree_params = InitAddressTreeAccountsInstructionData::test_default(); + + let (mut rpc, env) = + setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params( + Some(vec![( + String::from("create_address_test_program"), + CREATE_ADDRESS_TEST_PROGRAM_ID, + )]), + ProtocolConfig::default(), + true, + InitStateTreeAccountsInstructionData::test_default(), + tree_params, + ) + .await; + spawn_prover( + true, + ProverConfig { + run_mode: None, + circuits: vec![ProofType::NonInclusion, ProofType::BatchAddressAppendTest], + }, + ) + .await; + let payer = rpc.get_payer().insecure_clone(); + let mut test_indexer = + TestIndexer::::init_from_env(&payer, &env, None).await; + { + let new_merkle_tree = Keypair::new(); + let mut test_tree_params = InitAddressTreeAccountsInstructionData::default(); + test_tree_params.network_fee = Some(1); + let result = + create_batch_address_merkle_tree(&mut rpc, &payer, &new_merkle_tree, test_tree_params) + .await; + assert_rpc_error(result, 1, RegistryError::InvalidNetworkFee.into()).unwrap(); + } + + for i in 0..tree_params.input_queue_batch_size * 2 { + println!("tx {}", i); + perform_create_pda_with_event_rnd(&mut test_indexer, &mut rpc, &env, &payer) + .await + .unwrap(); + } + println!("pre perform_batch_address_merkle_tree_update"); + for _ in 0..5 { + perform_batch_address_merkle_tree_update( + &mut rpc, + &mut test_indexer, + &env.forester, + &env.forester.pubkey(), + &env.batch_address_merkle_tree, + 0, + ) + .await + .unwrap(); + } + let mut account = rpc + .get_account(env.batch_address_merkle_tree) + .await + .unwrap() + .unwrap(); + let zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut(account.data.as_mut_slice()) + .unwrap(); + test_indexer.finalize_batched_address_tree_update( + env.batch_address_merkle_tree, + zero_copy_account.get_account().queue.batch_size as usize, + *zero_copy_account.root_history.last().unwrap(), + ); + + // Non eligible forester. + { + let unregistered_forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&unregistered_forester_keypair.pubkey(), 1_000_000_000) + .await + .unwrap(); + let result = perform_batch_address_merkle_tree_update( + &mut rpc, + &mut test_indexer, + &unregistered_forester_keypair, + &env.forester.pubkey(), + &env.batch_address_merkle_tree, + 0, + ) + .await; + assert_rpc_error(result, 0, RegistryError::InvalidForester.into()).unwrap(); + } + + for _ in 0..tree_params.input_queue_batch_size { + perform_create_pda_with_event_rnd(&mut test_indexer, &mut rpc, &env, &payer) + .await + .unwrap(); + } + for _ in 0..5 { + perform_batch_address_merkle_tree_update( + &mut rpc, + &mut test_indexer, + &env.forester, + &env.forester.pubkey(), + &env.batch_address_merkle_tree, + 0, + ) + .await + .unwrap(); + } + let mut account = rpc + .get_account(env.batch_address_merkle_tree) + .await + .unwrap() + .unwrap(); + let zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut(account.data.as_mut_slice()) + .unwrap(); + test_indexer.finalize_batched_address_tree_update( + env.batch_address_merkle_tree, + zero_copy_account.get_account().queue.batch_size as usize, + *zero_copy_account.root_history.last().unwrap(), + ); +} + +pub async fn perform_batch_address_merkle_tree_update( + rpc: &mut R, + test_indexer: &mut TestIndexer, + forester: &Keypair, + derivation_pubkey: &Pubkey, + merkle_tree_pubkey: &Pubkey, + epoch: u64, +) -> Result { + let instruction_data = create_batch_update_address_tree_instruction_data_with_proof( + rpc, + test_indexer, + *merkle_tree_pubkey, + ) + .await + .unwrap(); + + let instruction = create_batch_update_address_tree_instruction( + forester.pubkey(), + *derivation_pubkey, + *merkle_tree_pubkey, + epoch, + instruction_data.try_to_vec().unwrap(), + ); + rpc.create_and_send_transaction(&[instruction], &forester.pubkey(), &[forester]) + .await +} + +#[serial] +#[tokio::test] +async fn test_rollover_batch_address_tree() { + let mut tree_params = InitAddressTreeAccountsInstructionData::test_default(); + tree_params.rollover_threshold = Some(0); + + let (mut rpc, env) = + setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params( + Some(vec![( + String::from("create_address_test_program"), + CREATE_ADDRESS_TEST_PROGRAM_ID, + )]), + ProtocolConfig::default(), + true, + InitStateTreeAccountsInstructionData::test_default(), + tree_params, + ) + .await; + spawn_prover( + false, + ProverConfig { + run_mode: None, + circuits: vec![ProofType::NonInclusion], + }, + ) + .await; + let payer = rpc.get_payer().insecure_clone(); + let mut test_indexer = + TestIndexer::::init_from_env(&payer, &env, None).await; + // Create one address to pay for rollover fees. + perform_create_pda_with_event_rnd(&mut test_indexer, &mut rpc, &env, &payer) + .await + .unwrap(); + let new_merkle_tree_keypair = Keypair::new(); + perform_rollover_batch_address_merkle_tree( + &mut rpc, + &env.forester, + env.forester.pubkey(), + env.batch_address_merkle_tree, + &new_merkle_tree_keypair, + 0, + ) + .await + .unwrap(); + let mut account = rpc + .get_account(new_merkle_tree_keypair.pubkey()) + .await + .unwrap() + .unwrap(); + let zero_copy_account = BatchedMerkleTreeAccount::get_address_tree_default( + env.group_pda, + tree_params.program_owner, + tree_params.forester, + Some(0), + tree_params.index, + tree_params.network_fee.unwrap_or_default(), + tree_params.input_queue_batch_size, + tree_params.input_queue_zkp_batch_size, + tree_params.bloom_filter_capacity, + tree_params.root_history_capacity, + tree_params.height, + 2, + account.lamports, + ); + assert_address_mt_zero_copy_inited(&mut account.data, zero_copy_account, 3); + // Create one address to pay for rollover fees. + perform_create_pda_with_event_rnd(&mut test_indexer, &mut rpc, &env, &payer) + .await + .unwrap(); + // invalid forester + { + let unregistered_forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&unregistered_forester_keypair.pubkey(), 1_000_000_000) + .await + .unwrap(); + let new_merkle_tree_keypair2 = Keypair::new(); + + let result = perform_rollover_batch_address_merkle_tree( + &mut rpc, + &unregistered_forester_keypair, + env.forester.pubkey(), + new_merkle_tree_keypair.pubkey(), + &new_merkle_tree_keypair2, + 0, + ) + .await; + assert_rpc_error(result, 1, RegistryError::InvalidForester.into()).unwrap(); + + // Issue is forester is not registered for this epoch + // register_test_forester( + // &mut rpc, + // &env.governance_authority, + // &unregistered_forester_keypair.pubkey(), + // ForesterConfig::default(), + // ) + // .await + // .unwrap(); + // let result = perform_rollover_batch_address_merkle_tree( + // &mut rpc, + // &unregistered_forester_keypair, + // unregistered_forester_keypair.pubkey(), + // new_merkle_tree_keypair.pubkey(), + // &new_merkle_tree_keypair2, + // 0, + // ) + // .await; + // assert_rpc_error(result, 1, RegistryError::NotInActivePhase.into()).unwrap(); + } + + let new_merkle_tree_keypair2 = Keypair::new(); + perform_rollover_batch_address_merkle_tree( + &mut rpc, + &env.forester, + env.forester.pubkey(), + new_merkle_tree_keypair.pubkey(), + &new_merkle_tree_keypair2, + 0, + ) + .await + .unwrap(); +} diff --git a/test-programs/system-cpi-test/Cargo.toml b/test-programs/system-cpi-test/Cargo.toml index 9eacf02ef5..0e331a5be7 100644 --- a/test-programs/system-cpi-test/Cargo.toml +++ b/test-programs/system-cpi-test/Cargo.toml @@ -39,7 +39,9 @@ light-test-utils = { version = "1.2.0", path = "../../test-utils", features=["de reqwest = "0.11.26" tokio = { workspace = true } light-prover-client = { path = "../../circuit-lib/light-prover-client", version = "1.2.0" } +light-verifier = { path = "../../circuit-lib/verifier", version = "1.1.0" } num-bigint = "0.4.6" num-traits = "0.2.19" spl-token = { workspace = true } anchor-spl = { workspace = true } +serial_test = { workspace = true } \ No newline at end of file diff --git a/test-programs/system-cpi-test/src/create_pda.rs b/test-programs/system-cpi-test/src/create_pda.rs index d14bbdef28..f774564405 100644 --- a/test-programs/system-cpi-test/src/create_pda.rs +++ b/test-programs/system-cpi-test/src/create_pda.rs @@ -1,11 +1,20 @@ -use account_compression::{program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED}; +use account_compression::{ + batched_merkle_tree::BatchedMerkleTreeAccount, program::AccountCompression, + utils::constants::CPI_AUTHORITY_PDA_SEED, AddressMerkleTreeAccount, +}; use anchor_lang::prelude::*; +use anchor_lang::Discriminator; use light_hasher::{errors::HasherError, DataHasher, Poseidon}; +use light_system_program::sdk::compressed_account::PackedCompressedAccountWithMerkleContext; +use light_system_program::sdk::compressed_account::PackedReadOnlyCompressedAccount; +use light_system_program::sdk::compressed_account::QueueIndex; +use light_system_program::InstructionDataInvokeCpiWithReadOnly; +use light_system_program::PackedReadOnlyAddress; use light_system_program::{ invoke::processor::CompressedProof, program::LightSystemProgram, sdk::{ - address::derive_address, + address::{derive_address, derive_address_legacy}, compressed_account::{CompressedAccount, CompressedAccountData}, CompressedCpiContext, }, @@ -14,12 +23,32 @@ use light_system_program::{ #[derive(AnchorSerialize, AnchorDeserialize, Debug, Clone, PartialEq)] pub enum CreatePdaMode { + Functional, + BatchFunctional, ProgramIsSigner, ProgramIsNotSigner, InvalidSignerSeeds, InvalidInvokingProgram, WriteToAccountNotOwned, NoData, + BatchAddressFunctional, + InvalidBatchTreeAccount, + OneReadOnlyAddress, + TwoReadOnlyAddresses, + InvalidReadOnlyAddress, + InvalidReadOnlyRootIndex, + InvalidReadOnlyMerkleTree, + ReadOnlyProofOfInsertedAddress, + UseReadOnlyAddressInAccount, + InvalidReadOnlyAccount, + InvalidReadOnlyAccountRootIndex, + InvalidReadOnlyAccountMerkleTree, + InvalidReadOnlyAccountOutputQueue, + InvalidProofReadOnlyAccount, + ReadOnlyProofOfInsertedAccount, + ProofIsNoneReadOnlyAccount, + AccountNotInValueVecMarkedProofByIndex, + InvalidLeafIndex, } pub fn process_create_pda<'info>( @@ -29,13 +58,21 @@ pub fn process_create_pda<'info>( new_address_params: NewAddressParamsPacked, owner_program: Pubkey, cpi_context: Option, - is_program_signer: CreatePdaMode, + mode: CreatePdaMode, bump: u8, + read_only_address: Option>, + read_only_accounts: Option>, + input_accounts: Option>, ) -> Result<()> { - let compressed_pda = - create_compressed_pda_data(data, &ctx, &new_address_params, &owner_program)?; + let compressed_pda = create_compressed_pda_data( + data, + &ctx, + &new_address_params, + &owner_program, + mode.clone(), + )?; - match is_program_signer { + match mode { CreatePdaMode::ProgramIsNotSigner => { cpi_compressed_pda_transfer_as_non_program( &ctx, @@ -46,7 +83,27 @@ pub fn process_create_pda<'info>( )?; } // functional test - CreatePdaMode::ProgramIsSigner => { + CreatePdaMode::ProgramIsSigner + | CreatePdaMode::BatchAddressFunctional + | CreatePdaMode::InvalidBatchTreeAccount + | CreatePdaMode::OneReadOnlyAddress + | CreatePdaMode::TwoReadOnlyAddresses + | CreatePdaMode::InvalidReadOnlyAddress + | CreatePdaMode::InvalidReadOnlyRootIndex + | CreatePdaMode::InvalidReadOnlyMerkleTree + | CreatePdaMode::UseReadOnlyAddressInAccount + | CreatePdaMode::ReadOnlyProofOfInsertedAddress + | CreatePdaMode::InvalidReadOnlyAccount + | CreatePdaMode::InvalidReadOnlyAccountRootIndex + | CreatePdaMode::InvalidReadOnlyAccountMerkleTree + | CreatePdaMode::ReadOnlyProofOfInsertedAccount + | CreatePdaMode::BatchFunctional + | CreatePdaMode::Functional + | CreatePdaMode::InvalidProofReadOnlyAccount + | CreatePdaMode::InvalidReadOnlyAccountOutputQueue + | CreatePdaMode::ProofIsNoneReadOnlyAccount + | CreatePdaMode::AccountNotInValueVecMarkedProofByIndex + | CreatePdaMode::InvalidLeafIndex => { cpi_compressed_pda_transfer_as_program( &ctx, proof, @@ -54,7 +111,10 @@ pub fn process_create_pda<'info>( compressed_pda, cpi_context, bump, - CreatePdaMode::ProgramIsSigner, + read_only_address, + read_only_accounts, + input_accounts, + mode, )?; } CreatePdaMode::InvalidSignerSeeds => { @@ -65,6 +125,9 @@ pub fn process_create_pda<'info>( compressed_pda, cpi_context, bump, + read_only_address, + None, + None, CreatePdaMode::InvalidSignerSeeds, )?; } @@ -76,6 +139,9 @@ pub fn process_create_pda<'info>( compressed_pda, cpi_context, bump, + read_only_address, + None, + None, CreatePdaMode::InvalidInvokingProgram, )?; } @@ -87,6 +153,9 @@ pub fn process_create_pda<'info>( compressed_pda, cpi_context, bump, + read_only_address, + None, + None, CreatePdaMode::WriteToAccountNotOwned, )?; } @@ -98,6 +167,9 @@ pub fn process_create_pda<'info>( compressed_pda, cpi_context, bump, + read_only_address, + None, + None, CreatePdaMode::NoData, )?; } @@ -159,6 +231,9 @@ fn cpi_compressed_pda_transfer_as_program<'info>( compressed_pda: OutputCompressedAccountWithPackedContext, cpi_context: Option, bump: u8, + mut read_only_address: Option>, + mut read_only_accounts: Option>, + input_accounts: Option>, mode: CreatePdaMode, ) -> Result<()> { let invoking_program = match mode { @@ -178,12 +253,18 @@ fn cpi_compressed_pda_transfer_as_program<'info>( compressed_pda.compressed_account.data = None; compressed_pda } + CreatePdaMode::UseReadOnlyAddressInAccount => { + let mut compressed_pda = compressed_pda; + compressed_pda.compressed_account.address = + Some(read_only_address.as_ref().unwrap()[0].address); + compressed_pda + } _ => compressed_pda, }; - let inputs_struct = InstructionDataInvokeCpi { + let mut inputs_struct = InstructionDataInvokeCpi { relay_fee: None, - input_compressed_accounts_with_merkle_context: Vec::new(), + input_compressed_accounts_with_merkle_context: input_accounts.unwrap_or_default(), output_compressed_accounts: vec![compressed_pda], proof, new_address_params: vec![new_address_params], @@ -193,34 +274,165 @@ fn cpi_compressed_pda_transfer_as_program<'info>( }; // defining seeds again so that the cpi doesn't fail we want to test the check in the compressed pda program let seeds: [&[u8]; 2] = [CPI_AUTHORITY_PDA_SEED, &[bump]]; - let mut inputs = Vec::new(); - InstructionDataInvokeCpi::serialize(&inputs_struct, &mut inputs).unwrap(); + msg!("read only address {:?}", read_only_address); + msg!("read only accounts {:?}", read_only_accounts); + if read_only_address.is_some() || read_only_accounts.is_some() { + if mode == CreatePdaMode::ReadOnlyProofOfInsertedAddress { + let read_only_address = read_only_address.as_mut().unwrap(); + read_only_address[0].address = inputs_struct.output_compressed_accounts[0] + .compressed_account + .address + .unwrap(); + } + // We currently only support two addresses hence we need to remove the + // account and address to make space for two read only addresses. + if mode == CreatePdaMode::TwoReadOnlyAddresses { + inputs_struct.output_compressed_accounts = vec![]; + inputs_struct.new_address_params = vec![]; + } + let mut remaining_accounts = ctx.remaining_accounts.to_vec(); - let cpi_accounts = light_system_program::cpi::accounts::InvokeCpiInstruction { - fee_payer: ctx.accounts.signer.to_account_info(), - authority: ctx.accounts.cpi_signer.to_account_info(), - registered_program_pda: ctx.accounts.registered_program_pda.to_account_info(), - noop_program: ctx.accounts.noop_program.to_account_info(), - account_compression_authority: ctx.accounts.account_compression_authority.to_account_info(), - account_compression_program: ctx.accounts.account_compression_program.to_account_info(), - invoking_program, - sol_pool_pda: None, - decompression_recipient: None, - system_program: ctx.accounts.system_program.to_account_info(), - cpi_context_account: None, - }; + if read_only_address.is_some() { + let read_only_address = read_only_address.as_mut().unwrap(); + match mode { + CreatePdaMode::InvalidReadOnlyMerkleTree => { + remaining_accounts.push(ctx.accounts.registered_program_pda.to_account_info()); + msg!( + "read_only_address[0].address_merkle_tree_account_index {:?}", + read_only_address[0].address_merkle_tree_account_index + ); + read_only_address[0].address_merkle_tree_account_index = + (remaining_accounts.len() - 1) as u8; + msg!( + "read_only_address[0].address_merkle_tree_account_index {:?}", + read_only_address[0].address_merkle_tree_account_index + ); + } + CreatePdaMode::InvalidReadOnlyRootIndex => { + read_only_address[0].address_merkle_tree_root_index = 1; + } + CreatePdaMode::InvalidReadOnlyAddress => { + read_only_address[0].address = [0; 32]; + } + _ => {} + } + } + if read_only_accounts.is_some() { + let read_only_account = read_only_accounts.as_mut().unwrap(); + match mode { + CreatePdaMode::InvalidReadOnlyAccountMerkleTree => { + read_only_account[0].merkle_context.merkle_tree_pubkey_index = + read_only_account[0] + .merkle_context + .nullifier_queue_pubkey_index; + } + CreatePdaMode::InvalidReadOnlyAccountRootIndex => { + let init_value = read_only_account[0].root_index; + read_only_account[0].root_index = + read_only_account[0].root_index.saturating_sub(1); + if read_only_account[0].root_index == init_value { + read_only_account[0].root_index = + read_only_account[0].root_index.saturating_add(1); + } + } + CreatePdaMode::InvalidReadOnlyAccount => { + read_only_account[0].account_hash = [0; 32]; + } + CreatePdaMode::ProofIsNoneReadOnlyAccount => { + inputs_struct.proof = None; + } + CreatePdaMode::InvalidProofReadOnlyAccount => { + inputs_struct.proof = Some(CompressedProof::default()); + } + CreatePdaMode::InvalidReadOnlyAccountOutputQueue => { + read_only_account[0] + .merkle_context + .nullifier_queue_pubkey_index = + read_only_account[0].merkle_context.merkle_tree_pubkey_index; + } + CreatePdaMode::AccountNotInValueVecMarkedProofByIndex => { + if read_only_account[0].merkle_context.queue_index.is_some() { + panic!("Queue index shouldn't be set for mode AccountNotInValueVecMarkedProofByIndex"); + } + read_only_account[0].merkle_context.queue_index = Some(QueueIndex::default()); + } + CreatePdaMode::InvalidLeafIndex => { + read_only_account[0].merkle_context.leaf_index += 1; + } + _ => {} + } + } - let signer_seeds: [&[&[u8]]; 1] = [&seeds[..]]; + msg!("read_only_address {:?}", read_only_address); + let inputs_struct = InstructionDataInvokeCpiWithReadOnly { + invoke_cpi: inputs_struct, + read_only_addresses: read_only_address, + read_only_accounts, + }; + let mut inputs = Vec::new(); + InstructionDataInvokeCpiWithReadOnly::serialize(&inputs_struct, &mut inputs).unwrap(); - let mut cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.light_system_program.to_account_info(), - cpi_accounts, - &signer_seeds, - ); + let cpi_accounts = light_system_program::cpi::accounts::InvokeCpiInstruction { + fee_payer: ctx.accounts.signer.to_account_info(), + authority: ctx.accounts.cpi_signer.to_account_info(), + registered_program_pda: ctx.accounts.registered_program_pda.to_account_info(), + noop_program: ctx.accounts.noop_program.to_account_info(), + account_compression_authority: ctx + .accounts + .account_compression_authority + .to_account_info(), + account_compression_program: ctx.accounts.account_compression_program.to_account_info(), + invoking_program, + sol_pool_pda: None, + decompression_recipient: None, + system_program: ctx.accounts.system_program.to_account_info(), + cpi_context_account: None, + }; - cpi_ctx.remaining_accounts = ctx.remaining_accounts.to_vec(); + let signer_seeds: [&[&[u8]]; 1] = [&seeds[..]]; - light_system_program::cpi::invoke_cpi(cpi_ctx, inputs)?; + let mut cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.light_system_program.to_account_info(), + cpi_accounts, + &signer_seeds, + ); + + cpi_ctx.remaining_accounts = remaining_accounts; + + light_system_program::cpi::invoke_cpi_with_read_only(cpi_ctx, inputs)?; + } else { + let mut inputs = Vec::new(); + InstructionDataInvokeCpi::serialize(&inputs_struct, &mut inputs).unwrap(); + + let cpi_accounts = light_system_program::cpi::accounts::InvokeCpiInstruction { + fee_payer: ctx.accounts.signer.to_account_info(), + authority: ctx.accounts.cpi_signer.to_account_info(), + registered_program_pda: ctx.accounts.registered_program_pda.to_account_info(), + noop_program: ctx.accounts.noop_program.to_account_info(), + account_compression_authority: ctx + .accounts + .account_compression_authority + .to_account_info(), + account_compression_program: ctx.accounts.account_compression_program.to_account_info(), + invoking_program, + sol_pool_pda: None, + decompression_recipient: None, + system_program: ctx.accounts.system_program.to_account_info(), + cpi_context_account: None, + }; + + let signer_seeds: [&[&[u8]]; 1] = [&seeds[..]]; + + let mut cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.light_system_program.to_account_info(), + cpi_accounts, + &signer_seeds, + ); + + cpi_ctx.remaining_accounts = ctx.remaining_accounts.to_vec(); + + light_system_program::cpi::invoke_cpi(cpi_ctx, inputs)?; + } Ok(()) } @@ -229,6 +441,7 @@ fn create_compressed_pda_data( ctx: &Context<'_, '_, '_, '_, CreateCompressedPda<'_>>, new_address_params: &NewAddressParamsPacked, owner_program: &Pubkey, + mode: CreatePdaMode, ) -> Result { let timelock_compressed_pda = RegisteredUser { user_pubkey: *ctx.accounts.signer.key, @@ -241,17 +454,47 @@ fn create_compressed_pda_data( .hash::() .map_err(ProgramError::from)?, }; - let derive_address = derive_address( + let mut discriminator_bytes = [0u8; 8]; + + discriminator_bytes.copy_from_slice( &ctx.remaining_accounts[new_address_params.address_merkle_tree_account_index as usize] - .key(), - &new_address_params.seed, - ) - .map_err(|_| ProgramError::InvalidArgument)?; + .try_borrow_data()?[0..8], + ); + let address = match discriminator_bytes { + AddressMerkleTreeAccount::DISCRIMINATOR => derive_address_legacy( + &ctx.remaining_accounts[new_address_params.address_merkle_tree_account_index as usize] + .key(), + &new_address_params.seed, + ) + .map_err(ProgramError::from)?, + BatchedMerkleTreeAccount::DISCRIMINATOR => derive_address( + &new_address_params.seed, + &ctx.remaining_accounts[new_address_params.address_merkle_tree_account_index as usize] + .key() + .to_bytes(), + &crate::ID.to_bytes(), + ), + _ => { + if mode == CreatePdaMode::InvalidBatchTreeAccount { + derive_address( + &new_address_params.seed, + &ctx.remaining_accounts + [new_address_params.address_merkle_tree_account_index as usize] + .key() + .to_bytes(), + &crate::ID.to_bytes(), + ) + } else { + panic!("Invalid discriminator"); + } + } + }; + Ok(OutputCompressedAccountWithPackedContext { compressed_account: CompressedAccount { - owner: *owner_program, // should be crate::ID, test provides an invalid owner + owner: *owner_program, // should be crate::ID, test can provide an invalid owner lamports: 0, - address: Some(derive_address), + address: Some(address), data: Some(compressed_account_data), }, merkle_tree_index: 0, diff --git a/test-programs/system-cpi-test/src/lib.rs b/test-programs/system-cpi-test/src/lib.rs index 683978d8a7..26e4359bdc 100644 --- a/test-programs/system-cpi-test/src/lib.rs +++ b/test-programs/system-cpi-test/src/lib.rs @@ -14,11 +14,13 @@ use account_compression::{ }; pub use invalidate_not_owned_account::*; use light_system_program::sdk::compressed_account::PackedCompressedAccountWithMerkleContext; +use light_system_program::sdk::compressed_account::PackedReadOnlyCompressedAccount; use light_system_program::sdk::CompressedCpiContext; +use light_system_program::PackedReadOnlyAddress; + declare_id!("FNt7byTHev1k5x2cXZLBr8TdWiC3zoP5vcnZR4P682Uy"); #[program] - pub mod system_cpi_test { use super::*; @@ -32,6 +34,9 @@ pub mod system_cpi_test { signer_is_program: CreatePdaMode, bump: u8, cpi_context: Option, + read_only_address: Option>, + read_only_accounts: Option>, + input_accounts: Option>, ) -> Result<()> { process_create_pda( ctx, @@ -42,6 +47,9 @@ pub mod system_cpi_test { cpi_context, signer_is_program, bump, + read_only_address, + read_only_accounts, + input_accounts, ) } diff --git a/test-programs/system-cpi-test/src/sdk.rs b/test-programs/system-cpi-test/src/sdk.rs index 172463bff9..1534673d07 100644 --- a/test-programs/system-cpi-test/src/sdk.rs +++ b/test-programs/system-cpi-test/src/sdk.rs @@ -13,11 +13,16 @@ use light_compressed_token::{ use light_system_program::{ invoke::processor::CompressedProof, sdk::{ - address::pack_new_address_params, - compressed_account::PackedCompressedAccountWithMerkleContext, + address::{ + pack_new_address_params, pack_read_only_accounts, pack_read_only_address_params, + }, + compressed_account::{ + CompressedAccountWithMerkleContext, PackedCompressedAccountWithMerkleContext, + ReadOnlyCompressedAccount, + }, }, utils::get_registered_program_pda, - NewAddressParams, + NewAddressParams, ReadOnlyAddress, }; use solana_sdk::{instruction::Instruction, pubkey::Pubkey}; @@ -34,6 +39,11 @@ pub struct CreateCompressedPdaInstructionInputs<'a> { pub owner_program: &'a Pubkey, pub signer_is_program: CreatePdaMode, pub registered_program_pda: &'a Pubkey, + pub readonly_adresses: Option>, + pub read_only_accounts: Option>, + pub input_compressed_accounts_with_merkle_context: + Option>, + pub state_roots: Option>>, } pub fn create_pda_instruction(input_params: CreateCompressedPdaInstructionInputs) -> Instruction { @@ -45,7 +55,34 @@ pub fn create_pda_instruction(input_params: CreateCompressedPdaInstructionInputs ); let new_address_params = pack_new_address_params(&[input_params.new_address_params], &mut remaining_accounts); - + let read_only_address = input_params + .readonly_adresses + .as_ref() + .map(|read_only_adresses| { + pack_read_only_address_params(read_only_adresses, &mut remaining_accounts) + }); + let read_only_accounts = input_params + .read_only_accounts + .as_ref() + .map(|read_only_accounts| { + pack_read_only_accounts(read_only_accounts, &mut remaining_accounts) + }); + let input_accounts = input_params + .input_compressed_accounts_with_merkle_context + .as_ref() + .map(|input_accounts| { + input_accounts + .iter() + .enumerate() + .map(|(i, x)| { + x.pack( + input_params.state_roots.as_ref().unwrap()[i], + &mut remaining_accounts, + ) + .unwrap() + }) + .collect::>() + }); let instruction_data = crate::instruction::CreateCompressedPda { data: input_params.data, proof: Some(input_params.proof.clone()), @@ -54,6 +91,9 @@ pub fn create_pda_instruction(input_params: CreateCompressedPdaInstructionInputs bump, signer_is_program: input_params.signer_is_program, cpi_context: None, + read_only_address, + read_only_accounts, + input_accounts, }; let compressed_token_cpi_authority_pda = get_cpi_authority_pda().0; diff --git a/test-programs/system-cpi-test/tests/test.rs b/test-programs/system-cpi-test/tests/test.rs index 2a540c9f7f..698dba52bd 100644 --- a/test-programs/system-cpi-test/tests/test.rs +++ b/test-programs/system-cpi-test/tests/test.rs @@ -1,5 +1,7 @@ #![cfg(feature = "test-sbf")] +use account_compression::errors::AccountCompressionErrorCode; +use account_compression::InitStateTreeAccountsInstructionData; use anchor_lang::AnchorDeserialize; use light_compressed_token::process_transfer::InputTokenDataWithContext; use light_compressed_token::token_data::AccountState; @@ -7,19 +9,23 @@ use light_hasher::{Hasher, Poseidon}; use light_program_test::test_env::{setup_test_programs_with_accounts, EnvAccounts}; use light_prover_client::gnark::helpers::{ProverConfig, ProverMode}; use light_system_program::errors::SystemProgramError; -use light_system_program::sdk::address::derive_address; +use light_system_program::sdk::address::{derive_address, derive_address_legacy}; use light_system_program::sdk::compressed_account::{ CompressedAccountWithMerkleContext, PackedCompressedAccountWithMerkleContext, PackedMerkleContext, }; use light_system_program::sdk::event::PublicTransactionEvent; use light_system_program::sdk::CompressedCpiContext; -use light_system_program::NewAddressParams; +use light_system_program::{NewAddressParams, ReadOnlyAddress}; +use light_test_utils::e2e_test_env::init_program_test_env; use light_test_utils::indexer::TestIndexer; use light_test_utils::spl::{create_mint_helper, mint_tokens_helper}; use light_test_utils::system_program::transfer_compressed_sol_test; +use light_test_utils::test_batch_forester::perform_batch_append; use light_test_utils::{assert_rpc_error, Indexer, RpcConnection, RpcError, TokenDataWithContext}; use light_utils::hash_to_bn254_field_size_be; +use light_verifier::VerifierError; +use serial_test::serial; use solana_sdk::signature::Keypair; use solana_sdk::{pubkey::Pubkey, signer::Signer, transaction::Transaction}; use system_cpi_test::sdk::{ @@ -29,6 +35,541 @@ use system_cpi_test::sdk::{ use system_cpi_test::{self, RegisteredUser, TokenTransferData, WithInputAccountsMode}; use system_cpi_test::{CreatePdaMode, ID}; +/// Tests: +/// 1. functional - 1 read only account proof by index +/// 2. functional - 1 read only account proof by index, 1 read only account by zkp +/// 3. functional - 10 read only account proof by index +/// 4. failing - read only account in v1 state mt +/// 5. failing - invalid read only account proof by index +/// 6. failing - invalid output queue +/// 7. failing - proof by index for invalidated account +/// 8. failing - proof is none +/// 9. failing - invalid proof +/// 10. failing - invalid root index +/// 11. failing - invalid read only account with zkp +/// 12. failing - zkp for invalidated account +/// 13. failing - invalid state mt +/// 14. failing - account marked as proof by index but index cannot be in value vec +/// 15. failing - invalid leaf index, proof by index +/// 16. functional - 4 read only accounts by zkp +/// 17. functional - 3 read only accounts by zkp 1 regular input +/// 18. functional - 1 read only account by zkp 3 regular inputs +/// +/// Read only account specific inputs: +/// struct PackedReadOnlyCompressedAccount { +/// account_hash: [u8; 32], // tested in 5 & 11 +/// merkle_context: PackedMerkleContext, +/// root_index: u16, // tested in 10 +/// } +/// +/// struct PackedMerkleContext { +/// merkle_tree_pubkey_index: u8, // tested in 13 +/// nullifier_queue_pubkey_index: u8, // tested in 6 +/// leaf_index: u32, // tested in 15 (not used with zkp) +/// queue_index: Option, // tested in 14 +///} +/// +#[serial] +#[tokio::test] +async fn test_read_only_accounts() { + let (_rpc, env) = + setup_test_programs_with_accounts(Some(vec![(String::from("system_cpi_test"), ID)])).await; + let payer = _rpc.get_payer().insecure_clone(); + let skip_prover = false; + + let mut e2e_env = init_program_test_env(_rpc, &env, skip_prover).await; + e2e_env.keypair_action_config.fee_assert = false; + // Create system state with accounts: + // - inserted a batched Merkle tree + // - inserted a batched output queue + // - inserted a batched output queue and batched Merkle tree + { + let params = InitStateTreeAccountsInstructionData::test_default(); + // fill two batches + for i in 0..params.output_queue_batch_size * 2 { + let seed = [i as u8; 32]; + let data = [i as u8; 31]; + perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::BatchFunctional, + ) + .await + .unwrap(); + } + println!("inserted two batches"); + // insert one batch + for _ in 0..5 { + perform_batch_append( + &mut e2e_env.rpc, + &mut e2e_env.indexer.state_merkle_trees[1], + &env.forester, + 0, + false, + None, + ) + .await + .unwrap(); + } + for i in 0..params.output_queue_zkp_batch_size { + let seed = [i as u8 + 100; 32]; + let data = [i as u8 + 100; 31]; + perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::BatchFunctional, + ) + .await + .unwrap(); + } + } + + // account in batched state mt and value vec + let account_in_value_array = e2e_env + .indexer + .get_compressed_accounts_by_owner(&ID) + .iter() + .find(|x| { + x.merkle_context.leaf_index == 101 + && x.merkle_context.merkle_tree_pubkey == env.batched_state_merkle_tree + }) + .unwrap() + .clone(); + + let account_not_in_value_array_and_in_mt = e2e_env + .indexer + .get_compressed_accounts_by_owner(&ID) + .iter() + .find(|x| { + x.merkle_context.leaf_index == 1 + && x.merkle_context.merkle_tree_pubkey == env.batched_state_merkle_tree + }) + .unwrap() + .clone(); + + // 1. functional - 1 read only account proof by index, an create 1 new account + { + let seed = [202u8; 32]; + let data = [2u8; 31]; + + perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_in_value_array.clone()]), + CreatePdaMode::BatchFunctional, + ) + .await + .unwrap(); + } + // 2. functional - 1 read only account proof by index, 1 read only account by zkp + { + let seed = [203u8; 32]; + let data = [3u8; 31]; + + perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![ + account_in_value_array.clone(), + account_not_in_value_array_and_in_mt.clone(), + ]), + CreatePdaMode::Functional, + ) + .await + .unwrap(); + } + // 3. functional - 10 read only account proof by index + { + let seed = [200u8; 32]; + let data = [3u8; 31]; + + perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_in_value_array.clone(); 10]), + CreatePdaMode::Functional, + ) + .await + .unwrap(); + } + let seed = [204u8; 32]; + let data = [4u8; 31]; + // 4. Failing - read only account in v1 state mt + { + let account_in_v1_tree = e2e_env + .indexer + .get_compressed_accounts_by_owner(&ID) + .iter() + .find(|x| x.merkle_context.merkle_tree_pubkey == env.merkle_tree_pubkey) + .unwrap() + .clone(); + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![ + account_in_value_array.clone(), + account_in_v1_tree.clone(), + ]), + CreatePdaMode::Functional, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidDiscriminator.into(), + ) + .unwrap(); + } + + let seed = [205u8; 32]; + let data = [5u8; 31]; + + // 5. Failing - invalid read only account proof by index + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_in_value_array.clone()]), + CreatePdaMode::InvalidReadOnlyAccount, + ) + .await; + assert_rpc_error( + result, + 0, + SystemProgramError::ReadOnlyAccountDoesNotExist.into(), + ) + .unwrap(); + } + + // 6. failing - invalid output queue + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_in_value_array.clone()]), + CreatePdaMode::InvalidReadOnlyAccountOutputQueue, + ) + .await; + + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidDiscriminator.into(), + ) + .unwrap(); + } + + // 7. failing - proof by index for invalidated account + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + Some(vec![account_in_value_array.clone()]), + Some(vec![account_in_value_array.clone()]), + CreatePdaMode::ReadOnlyProofOfInsertedAccount, + ) + .await; + assert_rpc_error( + result, + 0, + SystemProgramError::ReadOnlyAccountDoesNotExist.into(), + ) + .unwrap(); + } + // 8. failing - proof is none + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_not_in_value_array_and_in_mt.clone()]), + CreatePdaMode::ProofIsNoneReadOnlyAccount, + ) + .await; + assert_rpc_error(result, 0, SystemProgramError::ProofIsNone.into()).unwrap(); + } + // 9. failing - invalid proof + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_not_in_value_array_and_in_mt.clone()]), + CreatePdaMode::InvalidProofReadOnlyAccount, + ) + .await; + assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap(); + } + // 10. failing - invalid root index + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_not_in_value_array_and_in_mt.clone()]), + CreatePdaMode::InvalidReadOnlyAccountRootIndex, + ) + .await; + assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap(); + } + // 11. failing - invalid read only account with zkp + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_not_in_value_array_and_in_mt.clone()]), + CreatePdaMode::InvalidReadOnlyAccount, + ) + .await; + assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap(); + } + // 12. failing - zkp for invalidated account + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + Some(vec![account_not_in_value_array_and_in_mt.clone()]), + Some(vec![account_not_in_value_array_and_in_mt.clone()]), + CreatePdaMode::Functional, + ) + .await; + assert_rpc_error( + result, + 0, + SystemProgramError::ReadOnlyAccountDoesNotExist.into(), + ) + .unwrap(); + } + // 13. failing - invalid state mt + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_not_in_value_array_and_in_mt.clone()]), + CreatePdaMode::InvalidReadOnlyAccountMerkleTree, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch.into(), + ) + .unwrap(); + } + // 14. failing - account marked as proof by index but index cannot be in value vec + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_not_in_value_array_and_in_mt.clone()]), + CreatePdaMode::AccountNotInValueVecMarkedProofByIndex, + ) + .await; + assert_rpc_error( + result, + 0, + SystemProgramError::ReadOnlyAccountDoesNotExist.into(), + ) + .unwrap(); + } + // 15. failing - invalid leaf index, proof by index + { + let result = perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_in_value_array.clone()]), + CreatePdaMode::InvalidLeafIndex, + ) + .await; + assert_rpc_error( + result, + 0, + SystemProgramError::ReadOnlyAccountDoesNotExist.into(), + ) + .unwrap(); + } + // 16. functional - 4 read only accounts by zkp + { + perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + Some(vec![account_not_in_value_array_and_in_mt.clone(); 4]), + CreatePdaMode::Functional, + ) + .await + .unwrap(); + } + + // 17. functional - 3 read only accounts by zkp 1 regular input + { + let seed = [206u8; 32]; + let data = [5u8; 31]; + let input_account_in_mt = e2e_env + .indexer + .get_compressed_accounts_by_owner(&ID) + .iter() + .find(|x| { + x.merkle_context.leaf_index == 2 + && x.merkle_context.merkle_tree_pubkey == env.batched_state_merkle_tree + }) + .unwrap() + .clone(); + perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + Some(vec![input_account_in_mt.clone()]), + Some(vec![account_not_in_value_array_and_in_mt.clone(); 3]), + CreatePdaMode::Functional, + ) + .await + .unwrap(); + } + + // 18. functional - 1 read only account by zkp 3 regular inputs + { + let seed = [207u8; 32]; + let data = [5u8; 31]; + let mut input_accounts = Vec::new(); + for i in 31..34 { + let input_account_in_mt = e2e_env + .indexer + .get_compressed_accounts_by_owner(&ID) + .iter() + .find(|x| { + x.merkle_context.leaf_index == i + && x.merkle_context.merkle_tree_pubkey == env.batched_state_merkle_tree + }) + .unwrap() + .clone(); + input_accounts.push(input_account_in_mt); + } + perform_create_pda_with_event( + &mut e2e_env.indexer, + &mut e2e_env.rpc, + &env, + &payer, + seed, + &data, + &ID, + Some(input_accounts), + Some(vec![account_not_in_value_array_and_in_mt.clone()]), + CreatePdaMode::BatchFunctional, + ) + .await + .unwrap(); + } +} + /// Test: /// Functional: /// 1. Create pda @@ -46,21 +587,212 @@ use system_cpi_test::{CreatePdaMode, ID}; /// 11. write data to an account that it doesn't own (WriteAccessCheckFailed) /// 12. Spend Program owned account with program keypair (SignerCheckFailed) /// 13. Create program owned account without data (DataFieldUndefined) +#[serial] #[tokio::test] async fn only_test_create_pda() { let (mut rpc, env) = setup_test_programs_with_accounts(Some(vec![(String::from("system_cpi_test"), ID)])).await; let payer = rpc.get_payer().insecure_clone(); let mut test_indexer = TestIndexer::init_from_env( - &payer, - &env, - Some(ProverConfig { - run_mode: Some(ProverMode::Rpc), - circuits: vec![], - }), + &payer, &env, + // Some(ProverConfig { + // run_mode: Some(ProverMode::Rpc), + // circuits: vec![], + // }), + None, ) .await; + { + let seed = [5u8; 32]; + let data = [2u8; 31]; + + let result = perform_create_pda_with_event( + &mut test_indexer, + &mut rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::InvalidReadOnlyAddress, + ) + .await; + assert_rpc_error( + result, + 0, + light_verifier::VerifierError::ProofVerificationFailed.into(), + ) + .unwrap(); + + let result = perform_create_pda_with_event( + &mut test_indexer, + &mut rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::InvalidReadOnlyMerkleTree, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch.into(), + ) + .unwrap(); + + let result = perform_create_pda_with_event( + &mut test_indexer, + &mut rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::InvalidReadOnlyRootIndex, + ) + .await; + assert_rpc_error( + result, + 0, + light_verifier::VerifierError::ProofVerificationFailed.into(), + ) + .unwrap(); + let result = perform_create_pda_with_event( + &mut test_indexer, + &mut rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::UseReadOnlyAddressInAccount, + ) + .await; + assert_rpc_error(result, 0, SystemProgramError::InvalidAddress.into()).unwrap(); + + // The transaction inserts the address first, then checks read only addresses. + let result = perform_create_pda_with_event( + &mut test_indexer, + &mut rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::ReadOnlyProofOfInsertedAddress, + ) + .await; + assert_rpc_error( + result, + 0, + SystemProgramError::ReadOnlyAddressAlreadyExists.into(), + ) + .unwrap(); + + // Functional readonly address ---------------------------------------------- + perform_create_pda_with_event( + &mut test_indexer, + &mut rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::OneReadOnlyAddress, + ) + .await + .unwrap(); + + let seed = [6u8; 32]; + let data = [2u8; 31]; + perform_create_pda_with_event( + &mut test_indexer, + &mut rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::TwoReadOnlyAddresses, + ) + .await + .unwrap(); + } + { + let seed = [3u8; 32]; + let data = [2u8; 31]; + + // Functional batch address ---------------------------------------------- + perform_create_pda_with_event( + &mut test_indexer, + &mut rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::BatchAddressFunctional, + ) + .await + .unwrap(); + + // Failing batch address double spend ---------------------------------------------- + let result = perform_create_pda_with_event( + &mut test_indexer, + &mut rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::BatchAddressFunctional, + ) + .await; + // bloom filter full + assert_rpc_error(result, 0, 16001).unwrap(); + let seed = [4u8; 32]; + println!("post bloomf filter"); + let result = perform_create_pda_with_event( + &mut test_indexer, + &mut rpc, + &env, + &payer, + seed, + &data, + &ID, + None, + None, + CreatePdaMode::InvalidBatchTreeAccount, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch.into(), + ) + .unwrap(); + } let seed = [1u8; 32]; let data = [2u8; 31]; @@ -73,6 +805,8 @@ async fn only_test_create_pda() { seed, &data, &ID, + None, + None, CreatePdaMode::ProgramIsSigner, ) .await @@ -293,6 +1027,7 @@ async fn only_test_create_pda() { /// 3. Freeze /// 4. Thaw /// 5. Burn +#[serial] #[tokio::test] async fn test_approve_revoke_burn_freeze_thaw_with_cpi_context() { let (mut rpc, env) = @@ -331,6 +1066,8 @@ async fn test_approve_revoke_burn_freeze_thaw_with_cpi_context() { seed, &data, &ID, + None, + None, CreatePdaMode::ProgramIsSigner, ) .await @@ -465,6 +1202,7 @@ async fn test_approve_revoke_burn_freeze_thaw_with_cpi_context() { /// 1. Cannot create an address in a program owned address Merkle tree owned by a different program (InvalidMerkleTreeOwner) /// 2. Cannot create a compressed account in a program owned state Merkle tree owned by a different program (InvalidMerkleTreeOwner) /// 3. Create a compressed account and address in program owned state and address Merkle trees +#[serial] #[tokio::test] async fn test_create_pda_in_program_owned_merkle_trees() { let (mut rpc, env) = @@ -509,6 +1247,7 @@ async fn test_create_pda_in_program_owned_merkle_trees() { batched_cpi_context: env.batched_cpi_context, batched_output_queue: env.batched_output_queue, batched_state_merkle_tree: env.batched_state_merkle_tree, + batch_address_merkle_tree: env.batch_address_merkle_tree, }; perform_create_pda_failing( @@ -558,6 +1297,7 @@ async fn test_create_pda_in_program_owned_merkle_trees() { batched_cpi_context: env.batched_cpi_context, batched_output_queue: env.batched_output_queue, batched_state_merkle_tree: env.batched_state_merkle_tree, + batch_address_merkle_tree: env.batch_address_merkle_tree, }; perform_create_pda_failing( &mut test_indexer, @@ -617,6 +1357,7 @@ async fn test_create_pda_in_program_owned_merkle_trees() { batched_cpi_context: env.batched_cpi_context, batched_output_queue: env.batched_output_queue, batched_state_merkle_tree: env.batched_state_merkle_tree, + batch_address_merkle_tree: env.batch_address_merkle_tree, }; let seed = [4u8; 32]; let data = [5u8; 31]; @@ -628,6 +1369,8 @@ async fn test_create_pda_in_program_owned_merkle_trees() { seed, &data, &ID, + None, + None, CreatePdaMode::ProgramIsSigner, ) .await @@ -664,6 +1407,8 @@ pub async fn perform_create_pda_failing( data, payer_pubkey, owner_program, + None, + None, signer_is_program, ) .await; @@ -686,7 +1431,9 @@ pub async fn perform_create_pda_with_event( seed: [u8; 32], data: &[u8; 31], owner_program: &Pubkey, - signer_is_program: CreatePdaMode, + input_accounts: Option>, + read_only_accounts: Option>, + mode: CreatePdaMode, ) -> Result<(), RpcError> { let payer_pubkey = payer.pubkey(); let instruction = perform_create_pda( @@ -697,7 +1444,9 @@ pub async fn perform_create_pda_with_event( data, payer_pubkey, owner_program, - signer_is_program, + input_accounts, + read_only_accounts, + mode, ) .await; @@ -719,38 +1468,182 @@ async fn perform_create_pda( data: &[u8; 31], payer_pubkey: Pubkey, owner_program: &Pubkey, - signer_is_program: CreatePdaMode, + input_accounts: Option>, + read_only_accounts: Option>, + mode: CreatePdaMode, ) -> solana_sdk::instruction::Instruction { - let address = derive_address(&env.address_merkle_tree_pubkey, &seed).unwrap(); - + let output_compressed_account_merkle_tree_pubkey = if mode == CreatePdaMode::BatchFunctional { + &env.batched_output_queue + } else { + &env.merkle_tree_pubkey + }; + let (address, mut address_merkle_tree_pubkey, address_queue_pubkey) = if mode + == CreatePdaMode::BatchAddressFunctional + || mode == CreatePdaMode::InvalidReadOnlyAddress + || mode == CreatePdaMode::InvalidReadOnlyMerkleTree + || mode == CreatePdaMode::InvalidReadOnlyRootIndex + || mode == CreatePdaMode::TwoReadOnlyAddresses + || mode == CreatePdaMode::OneReadOnlyAddress + || mode == CreatePdaMode::ReadOnlyProofOfInsertedAddress + || mode == CreatePdaMode::UseReadOnlyAddressInAccount + { + let address = derive_address( + &seed, + &env.batch_address_merkle_tree.to_bytes(), + &system_cpi_test::ID.to_bytes(), + ); + println!("address: {:?}", address); + println!( + "address_merkle_tree_pubkey: {:?}", + env.address_merkle_tree_pubkey + ); + println!("program_id: {:?}", system_cpi_test::ID); + println!("seed: {:?}", seed); + ( + address, + env.batch_address_merkle_tree, + env.batch_address_merkle_tree, + ) + } else { + let address = derive_address_legacy(&env.address_merkle_tree_pubkey, &seed).unwrap(); + ( + address, + env.address_merkle_tree_pubkey, + env.address_merkle_tree_queue_pubkey, + ) + }; + let mut addresses = vec![address]; + let mut address_merkle_tree_pubkeys = vec![address_merkle_tree_pubkey]; + // InvalidReadOnlyAddress add address to proof but don't send in the instruction + if mode == CreatePdaMode::OneReadOnlyAddress + || mode == CreatePdaMode::InvalidReadOnlyAddress + || mode == CreatePdaMode::InvalidReadOnlyMerkleTree + || mode == CreatePdaMode::InvalidReadOnlyRootIndex + || mode == CreatePdaMode::ReadOnlyProofOfInsertedAddress + || mode == CreatePdaMode::UseReadOnlyAddressInAccount + { + let mut read_only_address = hash_to_bn254_field_size_be(&Pubkey::new_unique().to_bytes()) + .unwrap() + .0; + read_only_address[30] = 0; + read_only_address[29] = 0; + addresses.push(read_only_address); + address_merkle_tree_pubkeys.push(address_merkle_tree_pubkey); + } + if mode == CreatePdaMode::TwoReadOnlyAddresses { + let mut read_only_address = hash_to_bn254_field_size_be(&Pubkey::new_unique().to_bytes()) + .unwrap() + .0; + read_only_address[30] = 0; + read_only_address[29] = 0; + addresses.insert(0, read_only_address); + address_merkle_tree_pubkeys.push(address_merkle_tree_pubkey); + } + let mut compressed_account_hashes = Vec::new(); + let mut compressed_account_merkle_tree_pubkeys = Vec::new(); + if let Some(input_accounts) = input_accounts.as_ref() { + input_accounts.iter().for_each(|x| { + compressed_account_hashes.push(x.hash().unwrap()); + compressed_account_merkle_tree_pubkeys.push(x.merkle_context.merkle_tree_pubkey); + }); + } + if let Some(read_only_accounts) = read_only_accounts.as_ref() { + read_only_accounts.iter().for_each(|x| { + compressed_account_hashes.push(x.hash().unwrap()); + compressed_account_merkle_tree_pubkeys.push(x.merkle_context.merkle_tree_pubkey); + }); + } let rpc_result = test_indexer - .create_proof_for_compressed_accounts( - None, - None, - Some(&[address]), - Some(vec![env.address_merkle_tree_pubkey]), + .create_proof_for_compressed_accounts2( + if compressed_account_hashes.is_empty() { + None + } else { + Some(compressed_account_hashes) + }, + if compressed_account_merkle_tree_pubkeys.is_empty() { + None + } else { + Some(compressed_account_merkle_tree_pubkeys) + }, + Some(&addresses), + Some(address_merkle_tree_pubkeys), rpc, ) .await; - + println!("rpc_result: {:?}", rpc_result); + if mode == CreatePdaMode::InvalidBatchTreeAccount { + address_merkle_tree_pubkey = env.merkle_tree_pubkey; + } let new_address_params = NewAddressParams { seed, - address_merkle_tree_pubkey: env.address_merkle_tree_pubkey, - address_queue_pubkey: env.address_merkle_tree_queue_pubkey, + address_merkle_tree_pubkey, + address_queue_pubkey, address_merkle_tree_root_index: rpc_result.address_root_indices[0], }; + let readonly_adresses = if addresses.len() == 2 && mode != CreatePdaMode::TwoReadOnlyAddresses { + let read_only_address = vec![ReadOnlyAddress { + address: addresses[1], + address_merkle_tree_pubkey, + address_merkle_tree_root_index: rpc_result.address_root_indices[1], + }]; + Some(read_only_address) + } else if mode == CreatePdaMode::TwoReadOnlyAddresses { + let read_only_address = vec![ + ReadOnlyAddress { + address: addresses[0], + address_merkle_tree_pubkey, + address_merkle_tree_root_index: rpc_result.address_root_indices[0], + }, + ReadOnlyAddress { + address: addresses[1], + address_merkle_tree_pubkey, + address_merkle_tree_root_index: rpc_result.address_root_indices[1], + }, + ]; + Some(read_only_address) + } else { + None + }; + let mut index = 0; + let state_roots = if input_accounts.as_ref().is_none() { + None + } else { + let input_account_len = input_accounts.as_ref().unwrap().len(); + index += input_account_len; + Some(rpc_result.root_indices[..index].to_vec()) + }; + + let read_only_accounts = if let Some(read_only_accounts) = read_only_accounts.as_ref() { + Some( + read_only_accounts + .iter() + .map(|x| { + index += 1; + x.into_read_only(rpc_result.root_indices[index - 1]) + .unwrap() + }) + .collect::>(), + ) + } else { + None + }; + let create_ix_inputs = CreateCompressedPdaInstructionInputs { data: *data, signer: &payer_pubkey, - output_compressed_account_merkle_tree_pubkey: &env.merkle_tree_pubkey, - proof: &rpc_result.proof, + output_compressed_account_merkle_tree_pubkey, + proof: &rpc_result.proof.unwrap(), new_address_params, cpi_context_account: &env.cpi_context_account_pubkey, owner_program, - signer_is_program: signer_is_program.clone(), + signer_is_program: mode.clone(), registered_program_pda: &env.registered_program_pda, + readonly_adresses, + read_only_accounts, + input_compressed_accounts_with_merkle_context: input_accounts, + state_roots, }; - create_pda_instruction(create_ix_inputs.clone()) + create_pda_instruction(create_ix_inputs) } pub async fn assert_created_pda( @@ -766,7 +1659,7 @@ pub async fn assert_created_pda( .find(|x| x.compressed_account.owner == ID) .unwrap() .clone(); - let address = derive_address(&env.address_merkle_tree_pubkey, seed).unwrap(); + let address = derive_address_legacy(&env.address_merkle_tree_pubkey, seed).unwrap(); assert_eq!( compressed_escrow_pda.compressed_account.address.unwrap(), address diff --git a/test-programs/system-test/tests/test.rs b/test-programs/system-test/tests/test.rs index bd5951ac8f..5c320920e5 100644 --- a/test-programs/system-test/tests/test.rs +++ b/test-programs/system-test/tests/test.rs @@ -1,7 +1,9 @@ #![cfg(feature = "test-sbf")] use account_compression::batched_queue::ZeroCopyBatchedQueueAccount; use account_compression::errors::AccountCompressionErrorCode; -use account_compression::InitStateTreeAccountsInstructionData; +use account_compression::{ + InitAddressTreeAccountsInstructionData, InitStateTreeAccountsInstructionData, +}; use anchor_lang::error::ErrorCode; use anchor_lang::{AnchorSerialize, InstructionData, ToAccountMetas}; use light_hasher::Poseidon; @@ -13,13 +15,14 @@ use light_program_test::test_rpc::ProgramTestRpcConnection; use light_prover_client::gnark::helpers::{spawn_prover, ProofType, ProverConfig, ProverMode}; use light_registry::protocol_config::state::ProtocolConfig; use light_system_program::invoke::processor::CompressedProof; +use light_system_program::sdk::address::derive_address; use light_system_program::sdk::compressed_account::{ CompressedAccountWithMerkleContext, QueueIndex, }; use light_system_program::{ errors::SystemProgramError, sdk::{ - address::derive_address, + address::derive_address_legacy, compressed_account::{CompressedAccount, CompressedAccountData, MerkleContext}, invoke::{ create_invoke_instruction, create_invoke_instruction_data_and_remaining_accounts, @@ -482,7 +485,7 @@ pub async fn failing_transaction_inputs_inner( payer, inputs_struct, remaining_accounts.clone(), - ErrorCode::AccountDiscriminatorMismatch.into(), + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch.into(), ) .await .unwrap(); @@ -573,7 +576,7 @@ fn create_address_test_inputs( address_merkle_tree_root_index: 0, }); let derived_address = - derive_address(&env.address_merkle_tree_pubkey, address_seed).unwrap(); + derive_address_legacy(&env.address_merkle_tree_pubkey, address_seed).unwrap(); derived_addresses.push(derived_address); } (new_address_params, derived_addresses) @@ -687,7 +690,7 @@ pub async fn failing_transaction_address( payer, inputs_struct, remaining_accounts.clone(), - ErrorCode::AccountDiscriminatorMismatch.into(), + AccountCompressionErrorCode::AddressMerkleTreeAccountDiscriminatorMismatch.into(), ) .await .unwrap(); @@ -775,7 +778,7 @@ pub async fn failing_transaction_output( payer, inputs_struct.clone(), remaining_accounts.clone(), - ErrorCode::AccountDiscriminatorMismatch.into(), + AccountCompressionErrorCode::StateMerkleTreeAccountDiscriminatorMismatch.into(), ) .await .unwrap(); @@ -1155,6 +1158,8 @@ async fn invoke_test() { /// Tests Execute compressed transaction with address: /// 1. should fail: create out compressed account with address without input compressed account with address or created address +/// 2. should fail: v1 address tree with v2 address derivation +/// 3. should fail: v2 address tree create address with invoke instruction (invoking program id required for derivation) /// 2. should succeed: create out compressed account with new created address /// 3. should fail: create two addresses with the same seeds /// 4. should succeed: create two addresses with different seeds @@ -1179,7 +1184,8 @@ async fn test_with_address() { let merkle_tree_pubkey = env.merkle_tree_pubkey; let address_seed = [1u8; 32]; - let derived_address = derive_address(&env.address_merkle_tree_pubkey, &address_seed).unwrap(); + let derived_address = + derive_address_legacy(&env.address_merkle_tree_pubkey, &address_seed).unwrap(); let output_compressed_accounts = vec![CompressedAccount { lamports: 0, owner: payer_pubkey, @@ -1212,6 +1218,100 @@ async fn test_with_address() { let res = context.process_transaction(transaction).await; assert_custom_error_or_program_error(res, SystemProgramError::InvalidAddress.into()).unwrap(); + // v1 address tree with new derivation should fail + { + let derived_address = derive_address( + &address_seed, + &env.batch_address_merkle_tree.to_bytes(), + &payer_pubkey.to_bytes(), + ); + let output_compressed_accounts = vec![CompressedAccount { + lamports: 0, + owner: payer_pubkey, + data: None, + address: Some(derived_address), // this should not be sent, only derived on-chain + }]; + + let address_params = vec![NewAddressParams { + seed: address_seed, + address_queue_pubkey: env.address_merkle_tree_queue_pubkey, + address_merkle_tree_pubkey: env.address_merkle_tree_pubkey, + address_merkle_tree_root_index: 0, + }]; + let instruction = create_invoke_instruction( + &payer_pubkey, + &payer_pubkey, + &Vec::new(), + &output_compressed_accounts, + &Vec::new(), + &[env.batched_output_queue], + &Vec::new(), + &address_params, + None, + None, + false, + None, + true, + ); + + let transaction = Transaction::new_signed_with_payer( + &[instruction], + Some(&payer_pubkey), + &[&payer], + context.get_latest_blockhash().await.unwrap(), + ); + + let res = context.process_transaction(transaction).await; + assert_custom_error_or_program_error(res, SystemProgramError::InvalidAddress.into()) + .unwrap(); + } + // batch address tree with new derivation should fail with invoke because invoking program is not provided. + { + let derived_address = derive_address( + &address_seed, + &env.batch_address_merkle_tree.to_bytes(), + &payer_pubkey.to_bytes(), + ); + let output_compressed_accounts = vec![CompressedAccount { + lamports: 0, + owner: payer_pubkey, + data: None, + address: Some(derived_address), // this should not be sent, only derived on-chain + }]; + let address_params = vec![NewAddressParams { + seed: address_seed, + address_queue_pubkey: env.batch_address_merkle_tree, + address_merkle_tree_pubkey: env.batch_address_merkle_tree, + address_merkle_tree_root_index: 0, + }]; + + let instruction = create_invoke_instruction( + &payer_pubkey, + &payer_pubkey, + &Vec::new(), + &output_compressed_accounts, + &Vec::new(), + &[env.batched_output_queue], + &Vec::new(), + &address_params, + None, + None, + false, + None, + true, + ); + + let transaction = Transaction::new_signed_with_payer( + &[instruction], + Some(&payer_pubkey), + &[&payer], + context.get_latest_blockhash().await.unwrap(), + ); + + let res = context.process_transaction(transaction).await; + assert_custom_error_or_program_error(res, SystemProgramError::DeriveAddressError.into()) + .unwrap(); + } println!("creating address -------------------------"); create_addresses_test( &mut context, @@ -1568,6 +1668,7 @@ async fn regenerate_accounts() { true, skip_register_programs, InitStateTreeAccountsInstructionData::test_default(), + InitAddressTreeAccountsInstructionData::test_default(), ) .await; @@ -1670,7 +1771,9 @@ async fn regenerate_accounts() { /// 8. Should fail: double-spending by index after spending by ZKP. /// 9. Should fail: double-spending by ZKP after spending by index. /// 10. Should fail: double-spending by index after spending by index. -/// 11. Should fail: double-spending by ZKP after spending by ZKP. +/// 11. Should fail: double-spending by ZKP after spending by ZKP. +/// 12. Should fail: spend account by index which is not in value vec +/// 13. Should fail: spend account v1 by zkp marked as spent by index #[serial] #[tokio::test] async fn batch_invoke_test() { @@ -1988,13 +2091,9 @@ async fn batch_invoke_test() { ]; let merkle_context_1 = compressed_account_with_context_1.merkle_context; let mut merkle_context_2 = compressed_account_with_context_2.merkle_context; - // merkle_context_2.queue_index = Some(proofs_by_index[0].1); - // Queue index is not used it is just Some to signal that the value is not in the proof - merkle_context_2.queue_index = Some(QueueIndex { - index: 123, - queue_id: 200, - }); - + // // Queue index is not used it is just Some to signal that the value is not in the proof + merkle_context_2.queue_index = Some(QueueIndex::default()); + println!("root indices {:?}", proof_rpc_result.root_indices); let instruction = create_invoke_instruction( &payer_pubkey, &payer_pubkey, @@ -2154,6 +2253,117 @@ async fn batch_invoke_test() { ) .unwrap(); } + // 12. spend account by zkp but mark as spent by index + { + let compressed_account_with_context_1 = test_indexer + .compressed_accounts + .iter() + .filter(|x| { + x.compressed_account.owner == payer_pubkey + && x.merkle_context.nullifier_queue_pubkey == output_queue_pubkey + }) + .last() + .unwrap() + .clone(); + + // overwrite both output queue batches -> all prior values only exist in the Merkle tree not in the output queue + for _ in 0..2 { + create_compressed_accounts_in_batch_merkle_tree( + &mut context, + &mut test_indexer, + &payer, + output_queue_pubkey, + &env, + ) + .await + .unwrap(); + } + + let proof_rpc_result = test_indexer + .create_proof_for_compressed_accounts2( + Some(vec![compressed_account_with_context_1.hash().unwrap()]), + Some(vec![ + compressed_account_with_context_1 + .merkle_context + .merkle_tree_pubkey, + ]), + None, + None, + &mut context, + ) + .await; + let mut merkle_context = compressed_account_with_context_1.merkle_context; + merkle_context.queue_index = Some(QueueIndex::default()); + let instruction = create_invoke_instruction( + &payer_pubkey, + &payer_pubkey, + &input_compressed_accounts, + &output_compressed_accounts, + &[merkle_context], + &[merkle_context.nullifier_queue_pubkey], + &[None], + &Vec::new(), + proof_rpc_result.proof, + None, + false, + None, + true, + ); + + let result = context + .create_and_send_transaction(&[instruction], &payer_pubkey, &[&payer]) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InclusionProofByIndexFailed.into(), + ) + .unwrap(); + } + // 13. failing - spend account v1 by zkp but mark as spent by index + // v1 accounts cannot be spent by index + { + // Selecting compressed account in v1 Merkle tree + let compressed_account_with_context_1 = test_indexer + .compressed_accounts + .iter() + .filter(|x| { + x.compressed_account.owner == payer_pubkey + && x.merkle_context.nullifier_queue_pubkey != output_queue_pubkey + }) + .last() + .unwrap() + .clone(); + + let mut merkle_context = compressed_account_with_context_1.merkle_context; + merkle_context.queue_index = Some(QueueIndex::default()); + let instruction = create_invoke_instruction( + &payer_pubkey, + &payer_pubkey, + &input_compressed_accounts, + &output_compressed_accounts, + &[merkle_context], + &[merkle_context.merkle_tree_pubkey], + &[None], + &Vec::new(), + None, + None, + false, + None, + true, + ); + + let result = context + .create_and_send_transaction(&[instruction], &payer_pubkey, &[&payer]) + .await; + // Should fail because it tries to deserialize an output queue account from a nullifier queue account + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidDiscriminator.into(), + ) + .unwrap(); + } } #[derive(Debug, PartialEq)] @@ -2272,12 +2482,14 @@ pub async fn create_compressed_accounts_in_batch_merkle_tree( for _ in 0..remaining_leaves { create_output_accounts(context, &payer, test_indexer, output_queue_pubkey, 1, true).await?; } - let bundle = test_indexer - .state_merkle_trees - .iter_mut() - .find(|x| x.accounts.nullifier_queue == output_queue_pubkey) - .unwrap(); - perform_batch_append(context, bundle, &env.forester, 0, false, None).await?; + for _ in 0..output_queue.get_account().queue.get_num_zkp_batches() { + let bundle = test_indexer + .state_merkle_trees + .iter_mut() + .find(|x| x.accounts.nullifier_queue == output_queue_pubkey) + .unwrap(); + perform_batch_append(context, bundle, &env.forester, 0, false, None).await?; + } Ok(()) } pub async fn create_output_accounts( @@ -2326,7 +2538,7 @@ pub async fn create_output_accounts( &[&payer], Some(TransactionParams { num_input_compressed_accounts: 0, - num_output_compressed_accounts: 1, + num_output_compressed_accounts: num_accounts as u8, num_new_addresses: 0, compress: 0, fee_config, diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 23970e4c95..de18f6bf76 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -47,6 +47,7 @@ log = "0.4" serde = { version = "1.0.197", features = ["derive"] } async-trait = "0.1.82" light-client = { workspace = true } +create-address-test-program = { path = "../test-programs/create-address-test-program", version = "1.0.0" } spl-token-2022 = { workspace = true } [dev-dependencies] diff --git a/test-utils/src/address.rs b/test-utils/src/address.rs new file mode 100644 index 0000000000..cda71b15c7 --- /dev/null +++ b/test-utils/src/address.rs @@ -0,0 +1,50 @@ +use account_compression::instruction::InsertAddresses; +use anchor_lang::{prelude::AccountMeta, system_program}; +use anchor_lang::{InstructionData, ToAccountMetas}; +use light_client::rpc::{RpcConnection, RpcError}; +use solana_sdk::signature::Signer; +use solana_sdk::{ + instruction::Instruction, pubkey::Pubkey, signature::Signature, transaction::Transaction, +}; + +pub async fn insert_addresses( + context: &mut R, + address_queue_pubkey: Pubkey, + address_merkle_tree_pubkey: Pubkey, + addresses: Vec<[u8; 32]>, +) -> Result { + let num_addresses = addresses.len(); + let instruction_data = InsertAddresses { addresses }; + let accounts = account_compression::accounts::InsertIntoQueues { + fee_payer: context.get_payer().pubkey(), + authority: context.get_payer().pubkey(), + registered_program_pda: None, + system_program: system_program::ID, + }; + let insert_ix = Instruction { + program_id: account_compression::ID, + accounts: [ + accounts.to_account_metas(Some(true)), + vec![ + vec![ + AccountMeta::new(address_queue_pubkey, false), + AccountMeta::new(address_merkle_tree_pubkey, false) + ]; + num_addresses + ] + .iter() + .flat_map(|x| x.to_vec()) + .collect::>(), + ] + .concat(), + data: instruction_data.data(), + }; + let latest_blockhash = context.get_latest_blockhash().await.unwrap(); + let transaction = Transaction::new_signed_with_payer( + &[insert_ix], + Some(&context.get_payer().pubkey()), + &[&context.get_payer()], + latest_blockhash, + ); + context.process_transaction(transaction).await +} diff --git a/test-utils/src/assert_compressed_tx.rs b/test-utils/src/assert_compressed_tx.rs index 4ef53c5f5e..8c0dfcc60f 100644 --- a/test-utils/src/assert_compressed_tx.rs +++ b/test-utils/src/assert_compressed_tx.rs @@ -143,9 +143,10 @@ pub async fn assert_nullifiers_exist_in_hash_sets( .await .unwrap() .unwrap(); - let mut merkle_tree = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut merkle_tree_account.data) - .unwrap(); + let mut merkle_tree = ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut merkle_tree_account.data, + ) + .unwrap(); let mut batches = merkle_tree.batches.clone(); batches.iter_mut().enumerate().any(|(i, batch)| { batch @@ -407,9 +408,10 @@ pub async fn get_merkle_tree_snapshots( } BatchedMerkleTreeAccount::DISCRIMINATOR => { let merkle_tree_account_lamports = account_data.lamports; - let merkle_tree = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut account_data.data) - .unwrap(); + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + &mut account_data.data, + ) + .unwrap(); let queue_account_lamports = match rpc .get_account(account_bundle.nullifier_queue) .await diff --git a/test-utils/src/create_address_test_program_sdk.rs b/test-utils/src/create_address_test_program_sdk.rs new file mode 100644 index 0000000000..119d94b81b --- /dev/null +++ b/test-utils/src/create_address_test_program_sdk.rs @@ -0,0 +1,147 @@ +use account_compression::utils::constants::CPI_AUTHORITY_PDA_SEED; +use anchor_lang::{InstructionData, ToAccountMetas}; +use light_client::rpc::{RpcConnection, RpcError}; +use light_compressed_token::process_transfer::transfer_sdk::to_account_metas; +use std::collections::HashMap; + +use light_system_program::{ + invoke::processor::CompressedProof, + sdk::address::{derive_address, pack_new_address_params}, + NewAddressParams, +}; +use solana_sdk::{instruction::Instruction, pubkey::Pubkey, signature::Keypair, signer::Signer}; + +use crate::Indexer; +use crate::{indexer::TestIndexer, test_env::EnvAccounts}; + +#[derive(Debug, Clone)] +pub struct CreateCompressedPdaInstructionInputs<'a> { + pub data: [u8; 31], + pub signer: &'a Pubkey, + pub output_compressed_account_merkle_tree_pubkey: &'a Pubkey, + pub proof: &'a CompressedProof, + pub new_address_params: NewAddressParams, + pub registered_program_pda: &'a Pubkey, +} + +pub fn create_pda_instruction(input_params: CreateCompressedPdaInstructionInputs) -> Instruction { + let (cpi_signer, bump) = Pubkey::find_program_address( + &[CPI_AUTHORITY_PDA_SEED], + &create_address_test_program::id(), + ); + let mut remaining_accounts = HashMap::new(); + remaining_accounts.insert( + *input_params.output_compressed_account_merkle_tree_pubkey, + 0, + ); + let new_address_params = + pack_new_address_params(&[input_params.new_address_params], &mut remaining_accounts); + + let instruction_data = create_address_test_program::instruction::CreateCompressedPda { + data: input_params.data, + proof: Some(input_params.proof.clone()), + new_address_parameters: new_address_params[0], + bump, + }; + + let account_compression_authority = + light_system_program::utils::get_cpi_authority_pda(&light_system_program::ID); + + let accounts = create_address_test_program::accounts::CreateCompressedPda { + signer: *input_params.signer, + noop_program: Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY), + light_system_program: light_system_program::ID, + account_compression_program: account_compression::ID, + registered_program_pda: *input_params.registered_program_pda, + account_compression_authority, + self_program: create_address_test_program::ID, + cpi_signer, + system_program: solana_sdk::system_program::id(), + }; + let remaining_accounts = to_account_metas(remaining_accounts); + + Instruction { + program_id: create_address_test_program::ID, + accounts: [accounts.to_account_metas(Some(true)), remaining_accounts].concat(), + + data: instruction_data.data(), + } +} + +pub async fn perform_create_pda_with_event_rnd( + test_indexer: &mut TestIndexer, + rpc: &mut R, + env: &EnvAccounts, + payer: &Keypair, +) -> Result<(), RpcError> { + let seed = rand::random(); + let data = rand::random(); + perform_create_pda_with_event(test_indexer, rpc, env, payer, seed, &data).await +} +pub async fn perform_create_pda_with_event( + test_indexer: &mut TestIndexer, + rpc: &mut R, + env: &EnvAccounts, + payer: &Keypair, + seed: [u8; 32], + data: &[u8; 31], +) -> Result<(), RpcError> { + let (address, address_merkle_tree_pubkey, address_queue_pubkey) = { + let address = derive_address( + &seed, + &env.batch_address_merkle_tree.to_bytes(), + &create_address_test_program::ID.to_bytes(), + ); + println!("address: {:?}", address); + println!( + "address_merkle_tree_pubkey: {:?}", + env.address_merkle_tree_pubkey + ); + println!("program_id: {:?}", create_address_test_program::ID); + println!("seed: {:?}", seed); + ( + address, + env.batch_address_merkle_tree, + env.batch_address_merkle_tree, + ) + }; + + let rpc_result = test_indexer + .create_proof_for_compressed_accounts( + None, + None, + Some(&[address]), + Some(vec![address_merkle_tree_pubkey]), + rpc, + ) + .await; + + let new_address_params = NewAddressParams { + seed, + address_merkle_tree_pubkey, + address_queue_pubkey, + address_merkle_tree_root_index: rpc_result.address_root_indices[0], + }; + let create_ix_inputs = CreateCompressedPdaInstructionInputs { + data: *data, + signer: &payer.pubkey(), + output_compressed_account_merkle_tree_pubkey: &env.merkle_tree_pubkey, + proof: &rpc_result.proof, + new_address_params, + + registered_program_pda: &env.registered_program_pda, + }; + let instruction = create_pda_instruction(create_ix_inputs); + let pre_test_indexer_queue_len = test_indexer.address_merkle_trees[1].queue_elements.len(); + let event = rpc + .create_and_send_transaction_with_event(&[instruction], &payer.pubkey(), &[payer], None) + .await? + .unwrap(); + let slot: u64 = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.0); + assert_eq!( + test_indexer.address_merkle_trees[1].queue_elements.len(), + pre_test_indexer_queue_len + 1 + ); + Ok(()) +} diff --git a/test-utils/src/e2e_test_env.rs b/test-utils/src/e2e_test_env.rs index 707cd97b3c..b02d6d0455 100644 --- a/test-utils/src/e2e_test_env.rs +++ b/test-utils/src/e2e_test_env.rs @@ -207,19 +207,25 @@ impl Stats { pub async fn init_program_test_env( rpc: ProgramTestRpcConnection, env_accounts: &EnvAccounts, + skip_prover: bool, ) -> E2ETestEnv> { let indexer: TestIndexer = TestIndexer::init_from_env( &env_accounts.forester.insecure_clone(), env_accounts, - Some(ProverConfig { - run_mode: None, - circuits: vec![ - ProofType::BatchAppendWithProofsTest, - ProofType::BatchUpdateTest, - ProofType::Inclusion, - ProofType::NonInclusion, - ], - }), + if skip_prover { + None + } else { + Some(ProverConfig { + run_mode: None, + circuits: vec![ + ProofType::BatchAppendWithProofsTest, + ProofType::BatchUpdateTest, + ProofType::Inclusion, + ProofType::NonInclusion, + ProofType::Combined, + ], + }) + }, ) .await; @@ -510,10 +516,11 @@ where .await .unwrap() .unwrap(); - let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut( - merkle_tree_account.data.as_mut_slice(), - ) - .unwrap(); + let merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + merkle_tree_account.data.as_mut_slice(), + ) + .unwrap(); let next_full_batch_index = merkle_tree.get_account().queue.next_full_batch_index; let batch = merkle_tree @@ -529,7 +536,7 @@ where next_full_batch_index ); println!("input batch_state {:?}", batch_state); - if batch_state == BatchState::ReadyToUpdateTree { + if batch_state == BatchState::Full { println!("\n --------------------------------------------------\n\t\t NULLIFYING LEAVES batched (v2)\n --------------------------------------------------"); for _ in 0..TEST_DEFAULT_BATCH_SIZE { perform_batch_nullify( @@ -572,7 +579,7 @@ where + batch.get_current_zkp_batch_index() * batch.zkp_batch_size, next_full_batch_index ); - if batch_state == BatchState::ReadyToUpdateTree { + if batch_state == BatchState::Full { for _ in 0..TEST_DEFAULT_BATCH_SIZE { perform_batch_append( &mut self.rpc, @@ -600,7 +607,13 @@ where .empty_address_queue .unwrap_or_default(), ) { - for address_merkle_tree_bundle in self.indexer.get_address_merkle_trees_mut().iter_mut() + for address_merkle_tree_bundle in self + .indexer + .get_address_merkle_trees_mut() + .iter_mut() + .filter(|x| x.accounts.merkle_tree != x.accounts.queue) + .collect::>() + .iter_mut() { // find forester which is eligible this slot for this tree if let Some(payer) = Self::get_eligible_forester_for_queue( @@ -658,10 +671,21 @@ where } } - for index in 0..self.indexer.get_address_merkle_trees().len() { + for index in 0..self + .indexer + .get_address_merkle_trees() + .iter() + .filter(|x| x.accounts.merkle_tree != x.accounts.queue) + .collect::>() + .len() + { let is_read_for_rollover = address_tree_ready_for_rollover( &mut self.rpc, - self.indexer.get_address_merkle_trees()[index] + self.indexer + .get_address_merkle_trees() + .iter() + .filter(|x| x.accounts.merkle_tree != x.accounts.queue) + .collect::>()[index] .accounts .merkle_tree, ) @@ -673,7 +697,12 @@ where { // find forester which is eligible this slot for this tree if let Some(payer) = Self::get_eligible_forester_for_queue( - &self.indexer.get_address_merkle_trees()[index] + &self + .indexer + .get_address_merkle_trees() + .iter() + .filter(|x| x.accounts.merkle_tree != x.accounts.queue) + .collect::>()[index] .accounts .queue, &self.foresters, @@ -1088,6 +1117,7 @@ where }, merkle_tree, indexed_array, + queue_elements: vec![], }); // TODO: Add assert } @@ -1382,7 +1412,22 @@ where amount: u64, tree_index: Option, ) { - let input_compressed_accounts = self.get_compressed_sol_accounts(&from.pubkey()); + self.compress_sol_deterministic_opt_inputs(from, amount, tree_index, true) + .await; + } + + pub async fn compress_sol_deterministic_opt_inputs( + &mut self, + from: &Keypair, + amount: u64, + tree_index: Option, + inputs: bool, + ) { + let input_compressed_accounts = if inputs { + self.get_compressed_sol_accounts(&from.pubkey()) + } else { + vec![] + }; let bundle = self.indexer.get_state_merkle_trees()[tree_index.unwrap_or(0)].clone(); let rollover_fee = bundle.rollover_fee; let output_merkle_tree = match bundle.version { @@ -1410,7 +1455,7 @@ where &mut self.rpc, &mut self.indexer, from, - input_compressed_accounts.as_slice(), + &input_compressed_accounts[..std::cmp::min(input_compressed_accounts.len(), 4)], false, amount, &output_merkle_tree, @@ -1479,13 +1524,21 @@ where if let Some(address_tree_index) = address_tree_index { ( vec![ - self.indexer.get_address_merkle_trees()[address_tree_index] + self.indexer + .get_address_merkle_trees() + .iter() + .filter(|x| x.accounts.merkle_tree != x.accounts.queue) + .collect::>()[address_tree_index] .accounts .merkle_tree; num_addresses as usize ], vec![ - self.indexer.get_address_merkle_trees()[address_tree_index] + self.indexer + .get_address_merkle_trees() + .iter() + .filter(|x| x.accounts.merkle_tree != x.accounts.queue) + .collect::>()[address_tree_index] .accounts .queue; num_addresses as usize @@ -2081,7 +2134,13 @@ where payer: &Keypair, epoch: u64, ) -> Result<(), RpcError> { - let bundle = self.indexer.get_address_merkle_trees()[index].accounts; + let bundle = self + .indexer + .get_address_merkle_trees() + .iter() + .filter(|x| x.accounts.merkle_tree != x.accounts.queue) + .collect::>()[index] + .accounts; let new_nullifier_queue_keypair = Keypair::new(); let new_merkle_tree_keypair = Keypair::new(); let fee_payer_balance = self @@ -2171,16 +2230,30 @@ where for _ in 0..num { let index = Self::safe_gen_range( &mut self.rng, - 0..self.indexer.get_address_merkle_trees().len(), + 0..self + .indexer + .get_address_merkle_trees() + .iter() + .filter(|x| x.accounts.merkle_tree != x.accounts.queue) + .collect::>() + .len(), 0, ); pubkeys.push( - self.indexer.get_address_merkle_trees()[index] + self.indexer + .get_address_merkle_trees() + .iter() + .filter(|x| x.accounts.merkle_tree != x.accounts.queue) + .collect::>()[index] .accounts .merkle_tree, ); queue_pubkeys.push( - self.indexer.get_address_merkle_trees()[index] + self.indexer + .get_address_merkle_trees() + .iter() + .filter(|x| x.accounts.merkle_tree != x.accounts.queue) + .collect::>()[index] .accounts .queue, ); diff --git a/test-utils/src/indexer/test_indexer.rs b/test-utils/src/indexer/test_indexer.rs index aa0b63371d..21200dec7f 100644 --- a/test-utils/src/indexer/test_indexer.rs +++ b/test-utils/src/indexer/test_indexer.rs @@ -4,7 +4,7 @@ use light_macros::pubkey; use light_prover_client::batch_append_with_proofs::get_batch_append_with_proofs_inputs; use light_prover_client::batch_append_with_subtrees::calculate_hash_chain; use light_prover_client::gnark::batch_append_with_proofs_json_formatter::BatchAppendWithProofsInputsJson; -use light_system_program::invoke::verify_state_proof::{create_tx_hash, create_tx_hash_offchain}; +use light_system_program::invoke::verify_state_proof::create_tx_hash; use light_system_program::sdk::compressed_account::QueueIndex; use log::{debug, info, warn}; use num_bigint::BigUint; @@ -98,6 +98,42 @@ pub struct TestIndexer { } impl Indexer for TestIndexer { + async fn get_queue_elements( + &self, + pubkey: [u8; 32], + batch: u64, + start_offset: u64, + end_offset: u64, + ) -> Result, IndexerError> { + let pubkey = Pubkey::new_from_array(pubkey); + let address_tree_bundle = self + .address_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == pubkey); + if let Some(address_tree_bundle) = address_tree_bundle { + return Ok(address_tree_bundle.queue_elements + [start_offset as usize..end_offset as usize] + .to_vec()); + } + let state_tree_bundle = self + .state_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == pubkey); + if let Some(state_tree_bundle) = state_tree_bundle { + return Ok(state_tree_bundle.output_queue_elements + [start_offset as usize..end_offset as usize] + .to_vec()); + } + // let state_tree_bundle = self + // .state_merkle_trees + // .iter() + // .find(|x| x.accounts.nullifier_queue == pubkey); + // if let Some(state_tree_bundle) = state_tree_bundle { + // return Ok(state_tree_bundle.input_leaf_indices[start_offset as usize..end_offset as usize].to_vec()) + // } + Err(IndexerError::Custom("Merkle tree not found".to_string())) + } + async fn get_multiple_compressed_account_proofs( &self, hashes: Vec, @@ -145,66 +181,46 @@ impl Indexer for TestIndexer { Ok(hashes) } - async fn get_multiple_new_address_proofs( + async fn get_subtrees( &self, merkle_tree_pubkey: [u8; 32], - addresses: Vec<[u8; 32]>, - ) -> Result, IndexerError> { - let mut proofs: Vec = Vec::new(); - - for address in addresses.iter() { - info!("Getting new address proof for {:?}", address); - let pubkey = Pubkey::from(merkle_tree_pubkey); - let address_tree_bundle = self - .address_merkle_trees + ) -> Result, IndexerError> { + let merkle_tree_pubkey = Pubkey::new_from_array(merkle_tree_pubkey); + let address_tree_bundle = self + .address_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey); + if let Some(address_tree_bundle) = address_tree_bundle { + Ok(address_tree_bundle.merkle_tree.merkle_tree.get_subtrees()) + } else { + let state_tree_bundle = self + .state_merkle_trees .iter() - .find(|x| x.accounts.merkle_tree == pubkey) - .unwrap(); - - let address_biguint = BigUint::from_bytes_be(address.as_slice()); - let (old_low_address, _old_low_address_next_value) = address_tree_bundle - .indexed_array - .find_low_element_for_nonexistent(&address_biguint) - .unwrap(); - let address_bundle = address_tree_bundle - .indexed_array - .new_element_with_low_element_index(old_low_address.index, &address_biguint) - .unwrap(); - - let (old_low_address, old_low_address_next_value) = address_tree_bundle - .indexed_array - .find_low_element_for_nonexistent(&address_biguint) - .unwrap(); + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey); + if let Some(state_tree_bundle) = state_tree_bundle { + Ok(state_tree_bundle.merkle_tree.get_subtrees()) + } else { + Err(IndexerError::Custom("Merkle tree not found".to_string())) + } + } + } - // Get the Merkle proof for updating low element. - let low_address_proof = address_tree_bundle - .merkle_tree - .get_proof_of_leaf(old_low_address.index, false) - .unwrap(); + async fn get_multiple_new_address_proofs_full( + &self, + merkle_tree_pubkey: [u8; 32], + addresses: Vec<[u8; 32]>, + ) -> Result>, IndexerError> { + self._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, true) + .await + } - let low_address_index: u64 = old_low_address.index as u64; - let low_address_value: [u8; 32] = - bigint_to_be_bytes_array(&old_low_address.value).unwrap(); - let low_address_next_index: u64 = old_low_address.next_index as u64; - let low_address_next_value: [u8; 32] = - bigint_to_be_bytes_array(&old_low_address_next_value).unwrap(); - let low_address_proof: [[u8; 32]; 16] = low_address_proof.to_array().unwrap(); - let proof = NewAddressProofWithContext { - merkle_tree: merkle_tree_pubkey, - low_address_index, - low_address_value, - low_address_next_index, - low_address_next_value, - low_address_proof, - root: address_tree_bundle.merkle_tree.root(), - root_seq: address_tree_bundle.merkle_tree.merkle_tree.sequence_number as u64, - new_low_element: Some(address_bundle.new_low_element), - new_element: Some(address_bundle.new_element), - new_element_next_value: Some(address_bundle.new_element_next_value), - }; - proofs.push(proof); - } - Ok(proofs) + async fn get_multiple_new_address_proofs( + &self, + merkle_tree_pubkey: [u8; 32], + addresses: Vec<[u8; 32]>, + ) -> Result>, IndexerError> { + self._get_multiple_new_address_proofs(merkle_tree_pubkey, addresses, false) + .await } fn account_nullified(&mut self, merkle_tree_pubkey: Pubkey, account_hash: &str) { @@ -232,7 +248,7 @@ impl Indexer for TestIndexer { fn address_tree_updated( &mut self, merkle_tree_pubkey: Pubkey, - context: &NewAddressProofWithContext, + context: &NewAddressProofWithContext<16>, ) { info!("Updating address tree..."); let mut address_tree_bundle: &mut AddressMerkleTreeBundle = self @@ -579,6 +595,69 @@ impl Indexer for TestIndexer { } impl TestIndexer { + async fn _get_multiple_new_address_proofs( + &self, + merkle_tree_pubkey: [u8; 32], + addresses: Vec<[u8; 32]>, + full: bool, + ) -> Result>, IndexerError> { + let mut proofs: Vec> = Vec::new(); + + for address in addresses.iter() { + info!("Getting new address proof for {:?}", address); + let pubkey = Pubkey::from(merkle_tree_pubkey); + let address_tree_bundle = self + .address_merkle_trees + .iter() + .find(|x| x.accounts.merkle_tree == pubkey) + .unwrap(); + + let address_biguint = BigUint::from_bytes_be(address.as_slice()); + let (old_low_address, _old_low_address_next_value) = address_tree_bundle + .indexed_array + .find_low_element_for_nonexistent(&address_biguint) + .unwrap(); + let address_bundle = address_tree_bundle + .indexed_array + .new_element_with_low_element_index(old_low_address.index, &address_biguint) + .unwrap(); + + let (old_low_address, old_low_address_next_value) = address_tree_bundle + .indexed_array + .find_low_element_for_nonexistent(&address_biguint) + .unwrap(); + + // Get the Merkle proof for updating low element. + let low_address_proof = address_tree_bundle + .merkle_tree + .get_proof_of_leaf(old_low_address.index, full) + .unwrap(); + + let low_address_index: u64 = old_low_address.index as u64; + let low_address_value: [u8; 32] = + bigint_to_be_bytes_array(&old_low_address.value).unwrap(); + let low_address_next_index: u64 = old_low_address.next_index as u64; + let low_address_next_value: [u8; 32] = + bigint_to_be_bytes_array(&old_low_address_next_value).unwrap(); + let low_address_proof: [[u8; 32]; NET_HEIGHT] = low_address_proof.to_array().unwrap(); + let proof = NewAddressProofWithContext:: { + merkle_tree: merkle_tree_pubkey, + low_address_index, + low_address_value, + low_address_next_index, + low_address_next_value, + low_address_proof, + root: address_tree_bundle.merkle_tree.root(), + root_seq: address_tree_bundle.merkle_tree.merkle_tree.sequence_number as u64, + new_low_element: Some(address_bundle.new_low_element), + new_element: Some(address_bundle.new_element), + new_element_next_value: Some(address_bundle.new_element_next_value), + }; + proofs.push(proof); + } + Ok(proofs) + } + fn count_matching_hashes(&self, query_hashes: &[String]) -> usize { self.nullified_compressed_accounts .iter() @@ -617,10 +696,16 @@ impl TestIndexer { cpi_context: env.batched_cpi_context, }, ], - vec![AddressMerkleTreeAccounts { - merkle_tree: env.address_merkle_tree_pubkey, - queue: env.address_merkle_tree_queue_pubkey, - }], + vec![ + AddressMerkleTreeAccounts { + merkle_tree: env.address_merkle_tree_pubkey, + queue: env.address_merkle_tree_queue_pubkey, + }, + AddressMerkleTreeAccounts { + merkle_tree: env.batch_address_merkle_tree, + queue: env.batch_address_merkle_tree, + }, + ], payer.insecure_clone(), env.group_pda, prover_config, @@ -706,6 +791,7 @@ impl TestIndexer { indexed_array, accounts: address_merkle_tree_accounts, rollover_fee: FeeConfig::default().address_queue_rollover as i64, + queue_elements: vec![], } } @@ -853,7 +939,7 @@ impl TestIndexer { .await .unwrap() .unwrap(); - let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut( + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( merkle_tree_account.data.as_mut_slice(), ) .unwrap(); @@ -874,7 +960,7 @@ impl TestIndexer { (batch_inclusion_proof_inputs, root_indices) } - async fn process_non_inclusion_proofs( + pub async fn process_non_inclusion_proofs( &self, address_merkle_tree_pubkeys: &[Pubkey], addresses: &[[u8; 32]], @@ -882,6 +968,7 @@ impl TestIndexer { ) -> (BatchNonInclusionJsonStruct, Vec) { let mut non_inclusion_proofs = Vec::new(); let mut address_root_indices = Vec::new(); + println!("process_non_inclusion_proofs: addresses {:?}", addresses); for (i, address) in addresses.iter().enumerate() { let address_tree = &self .address_merkle_trees @@ -894,16 +981,42 @@ impl TestIndexer { &address_tree.indexed_array, ); non_inclusion_proofs.push(proof_inputs); - let fetched_address_merkle_tree = unsafe { - get_indexed_merkle_tree::( - rpc, - address_merkle_tree_pubkeys[i], - ) - .await - }; - address_root_indices.push(fetched_address_merkle_tree.root_index() as u16); + // We don't have address queues in v2 (batch) address Merkle trees + // hence both accounts in this struct are the same. + let is_v2 = address_tree.accounts.merkle_tree == address_tree.accounts.queue; + println!("is v2 {:?}", is_v2); + println!( + "address_merkle_tree_pubkeys[i] {:?}", + address_merkle_tree_pubkeys[i] + ); + println!("address_tree.accounts {:?}", address_tree.accounts); + if is_v2 { + let account = rpc + .get_account(address_merkle_tree_pubkeys[i]) + .await + .unwrap(); + if let Some(mut account) = account { + let account = ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + account.data.as_mut_slice(), + ) + .unwrap(); + address_root_indices.push(account.get_root_index() as u16); + } else { + panic!( + "TestIndexer.process_non_inclusion_proofs(): Address tree account not found." + ); + } + } else { + let fetched_address_merkle_tree = unsafe { + get_indexed_merkle_tree::( + rpc, + address_merkle_tree_pubkeys[i], + ) + .await + }; + address_root_indices.push(fetched_address_merkle_tree.root_index() as u16); + } } - let non_inclusion_proof_inputs = NonInclusionProofInputs(non_inclusion_proofs.as_slice()); let batch_non_inclusion_proof_inputs = BatchNonInclusionJsonStruct::from_non_inclusion_proof_inputs( @@ -965,12 +1078,14 @@ impl TestIndexer { token_compressed_accounts: &mut Vec, compressed_accounts: &mut Vec, ) { + let mut input_addresses = vec![]; if event.input_compressed_account_hashes.len() > i { - let tx_hash: [u8; 32] = create_tx_hash_offchain( + let tx_hash: [u8; 32] = create_tx_hash( &event.input_compressed_account_hashes, &event.output_compressed_account_hashes, slot, - ); + ) + .unwrap(); println!("tx_hash {:?}", tx_hash); println!("slot {:?}", slot); let hash = event.input_compressed_account_hashes[i]; @@ -990,6 +1105,9 @@ impl TestIndexer { let merkle_tree_pubkey = self.compressed_accounts[index] .merkle_context .merkle_tree_pubkey; + if let Some(address) = self.compressed_accounts[index].compressed_account.address { + input_addresses.push(address); + } self.compressed_accounts.remove(index); (leaf_index, merkle_tree_pubkey) } else { @@ -1033,8 +1151,15 @@ impl TestIndexer { .push((leaf_index, leaf_hash, tx_hash)); } } + let mut new_addresses = vec![]; if event.output_compressed_accounts.len() > i { let compressed_account = &event.output_compressed_accounts[i]; + println!("output compressed account {:?}", compressed_account); + if let Some(address) = compressed_account.compressed_account.address { + if !input_addresses.iter().any(|x| x == &address) { + new_addresses.push(address); + } + } let merkle_tree = self.state_merkle_trees.iter().find(|x| { x.accounts.merkle_tree @@ -1167,5 +1292,64 @@ impl TestIndexer { .push(event.output_compressed_account_hashes[i]); } } + println!("new addresses {:?}", new_addresses); + println!("event.pubkey_array {:?}", event.pubkey_array); + println!( + "address merkle trees {:?}", + self.address_merkle_trees + .iter() + .map(|x| x.accounts.merkle_tree) + .collect::>() + ); + // checks whether there are addresses in outputs which don't exist in inputs. + // if so check pubkey_array for the first address Merkle tree and append to the bundles queue elements. + // Note: + // - creating addresses in multiple address Merkle trees in one tx is not supported + // TODO: reimplement this is not a good solution + // - take addresses and address Merkle tree pubkeys from cpi to account compression program + if !new_addresses.is_empty() { + for pubkey in event.pubkey_array.iter() { + if let Some((i, address_merkle_tree)) = self + .address_merkle_trees + .iter_mut() + .enumerate() + .find(|(i, x)| x.accounts.merkle_tree == *pubkey) + { + address_merkle_tree + .queue_elements + .append(&mut new_addresses); + } + } + } + } + + pub fn finalize_batched_address_tree_update( + &mut self, + merkle_tree_pubkey: Pubkey, + batch_size: usize, + onchain_root: [u8; 32], + ) { + let address_tree = self + .address_merkle_trees + .iter_mut() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + .unwrap(); + let addresses = address_tree.queue_elements[0..batch_size].to_vec(); + for i in 0..batch_size { + address_tree.queue_elements.remove(0); + } + for new_element_value in &addresses { + address_tree + .merkle_tree + .append( + &BigUint::from_bytes_be(new_element_value), + &mut address_tree.indexed_array, + ) + .unwrap(); + } + + let new_root = address_tree.merkle_tree.root(); + assert_eq!(onchain_root, new_root); + println!("finalized batched address tree update"); } } diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs index 645cdc6d97..dc44e9e5ba 100644 --- a/test-utils/src/lib.rs +++ b/test-utils/src/lib.rs @@ -6,6 +6,7 @@ use solana_sdk::signature::{Keypair, Signature, Signer}; use solana_sdk::{instruction::InstructionError, transaction}; use std::cmp; +pub mod address; pub mod address_tree_rollover; pub mod assert_address_merkle_tree; pub mod assert_compressed_tx; @@ -14,6 +15,7 @@ pub mod assert_merkle_tree; pub mod assert_queue; pub mod assert_rollover; pub mod assert_token_tx; +pub mod create_address_test_program_sdk; pub mod e2e_test_env; #[allow(unused)] pub mod indexer; @@ -23,7 +25,7 @@ pub mod system_program; #[allow(unused)] pub mod test_forester; pub mod test_batch_forester; - +pub use create_address_test_program::ID as CREATE_ADDRESS_TEST_PROGRAM_ID; use crate::assert_address_merkle_tree::assert_address_merkle_tree_initialized; use crate::assert_queue::assert_address_queue_initialized; pub use forester_utils::{ diff --git a/test-utils/src/system_program.rs b/test-utils/src/system_program.rs index 6187952219..49cbea75eb 100644 --- a/test-utils/src/system_program.rs +++ b/test-utils/src/system_program.rs @@ -1,9 +1,9 @@ use forester_utils::indexer::Indexer; use light_hasher::Poseidon; +use light_system_program::sdk::address::derive_address_legacy; use light_system_program::sdk::event::PublicTransactionEvent; use light_system_program::{ sdk::{ - address::derive_address, compressed_account::{ CompressedAccount, CompressedAccountWithMerkleContext, MerkleContext, }, @@ -42,7 +42,7 @@ pub async fn create_addresses_test>( let mut derived_addresses = Vec::new(); for (i, address_seed) in address_seeds.iter().enumerate() { let derived_address = - derive_address(&address_merkle_tree_pubkeys[i], address_seed).unwrap(); + derive_address_legacy(&address_merkle_tree_pubkeys[i], address_seed).unwrap(); println!("derived_address: {:?}", derived_address); derived_addresses.push(derived_address); } diff --git a/test-utils/src/test_batch_forester.rs b/test-utils/src/test_batch_forester.rs index 6a1144917a..7fe0e4d078 100644 --- a/test-utils/src/test_batch_forester.rs +++ b/test-utils/src/test_batch_forester.rs @@ -1,5 +1,6 @@ use account_compression::{ - assert_mt_zero_copy_inited, assert_state_mt_roll_over, + assert_address_mt_zero_copy_inited, assert_state_mt_roll_over, + assert_state_mt_zero_copy_inited, batched_merkle_tree::{ get_merkle_tree_account_size, AppendBatchProofInputsIx, BatchAppendEvent, BatchNullifyEvent, BatchProofInputsIx, BatchedMerkleTreeAccount, @@ -10,17 +11,24 @@ use account_compression::{ assert_queue_zero_copy_inited, get_output_queue_account_size, BatchedQueueAccount, ZeroCopyBatchedQueueAccount, }, - get_output_queue_account_default, InitStateTreeAccountsInstructionData, + get_output_queue_account_default, InitAddressTreeAccountsInstructionData, + InitStateTreeAccountsInstructionData, }; use anchor_lang::{AnchorDeserialize, AnchorSerialize}; -use forester_utils::{create_account_instruction, indexer::StateMerkleTreeBundle, AccountZeroCopy}; +use forester_utils::{ + create_account_instruction, + indexer::{Indexer, StateMerkleTreeBundle}, + AccountZeroCopy, +}; use light_client::rpc::{RpcConnection, RpcError}; use light_hasher::Poseidon; use light_prover_client::{ + batch_address_append::get_batch_address_append_circuit_inputs, batch_append_with_proofs::get_batch_append_with_proofs_inputs, batch_append_with_subtrees::calculate_hash_chain, batch_update::get_batch_update_inputs, gnark::{ + batch_address_append_json_formatter::to_json, batch_append_with_proofs_json_formatter::BatchAppendWithProofsInputsJson, batch_update_json_formatter::update_inputs_string, constants::{PROVE_PATH, SERVER_ADDRESS}, @@ -30,6 +38,7 @@ use light_prover_client::{ use light_registry::{ account_compression_cpi::sdk::{ create_batch_append_instruction, create_batch_nullify_instruction, + create_initialize_batched_address_merkle_tree_instruction, create_initialize_batched_merkle_tree_instruction, }, protocol_config::state::{ProtocolConfig, ProtocolConfigPda}, @@ -98,9 +107,10 @@ pub async fn create_append_batch_ix_data( output_queue_pubkey: Pubkey, ) -> InstructionDataBatchAppendInputs { let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); - let merkle_tree = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(merkle_tree_account.data.as_mut_slice()) - .unwrap(); + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + merkle_tree_account.data.as_mut_slice(), + ) + .unwrap(); let merkle_tree_next_index = merkle_tree.get_account().next_index as usize; let mut output_queue_account = rpc.get_account(output_queue_pubkey).await.unwrap().unwrap(); @@ -259,9 +269,10 @@ pub async fn get_batched_nullify_ix_data( merkle_tree_pubkey: Pubkey, ) -> Result { let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); - let merkle_tree = - ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(merkle_tree_account.data.as_mut_slice()) - .unwrap(); + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::state_tree_from_bytes_mut( + merkle_tree_account.data.as_mut_slice(), + ) + .unwrap(); let zkp_batch_size = merkle_tree.get_account().queue.zkp_batch_size; let full_batch_index = merkle_tree.get_account().queue.next_full_batch_index; let full_batch = &merkle_tree.batches[full_batch_index as usize]; @@ -499,7 +510,7 @@ pub async fn assert_registry_created_batched_state_merkle_tree params.height, params.input_queue_num_batches, ); - assert_mt_zero_copy_inited( + assert_state_mt_zero_copy_inited( merkle_tree.account.data.as_mut_slice(), ref_mt_account, params.bloom_filter_num_iters, @@ -747,3 +758,256 @@ pub async fn assert_perform_state_mt_roll_over( slot, ); } +pub async fn create_batch_address_merkle_tree( + rpc: &mut R, + payer: &Keypair, + new_address_merkle_tree_keypair: &Keypair, + address_tree_params: InitAddressTreeAccountsInstructionData, +) -> Result { + let mt_account_size = get_merkle_tree_account_size( + address_tree_params.input_queue_batch_size, + address_tree_params.bloom_filter_capacity, + address_tree_params.input_queue_zkp_batch_size, + address_tree_params.root_history_capacity, + address_tree_params.height, + address_tree_params.input_queue_num_batches, + ); + let mt_rent = rpc + .get_minimum_balance_for_rent_exemption(mt_account_size) + .await + .unwrap(); + let create_mt_account_ix = create_account_instruction( + &payer.pubkey(), + mt_account_size, + mt_rent, + &account_compression::ID, + Some(new_address_merkle_tree_keypair), + ); + + let instruction = create_initialize_batched_address_merkle_tree_instruction( + payer.pubkey(), + new_address_merkle_tree_keypair.pubkey(), + address_tree_params, + ); + rpc.create_and_send_transaction( + &[create_mt_account_ix, instruction], + &payer.pubkey(), + &[payer, new_address_merkle_tree_keypair], + ) + .await +} + +pub async fn assert_registry_created_batched_address_merkle_tree( + rpc: &mut R, + payer_pubkey: Pubkey, + merkle_tree_pubkey: Pubkey, + params: InitAddressTreeAccountsInstructionData, +) -> Result<(), RpcError> { + let mut merkle_tree = + AccountZeroCopy::::new(rpc, merkle_tree_pubkey).await; + + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + let mt_rent = rpc + .get_minimum_balance_for_rent_exemption(mt_account_size) + .await + .unwrap(); + let ref_mt_account = BatchedMerkleTreeAccount::get_address_tree_default( + payer_pubkey, + params.program_owner, + params.forester, + params.rollover_threshold, + params.index, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + mt_rent, + ); + assert_address_mt_zero_copy_inited( + merkle_tree.account.data.as_mut_slice(), + ref_mt_account, + params.bloom_filter_num_iters, + ); + + Ok(()) +} + +pub async fn create_batch_update_address_tree_instruction_data_with_proof< + R: RpcConnection, + I: Indexer, +>( + rpc: &mut R, + indexer: &I, + merkle_tree_pubkey: Pubkey, +) -> Result { + let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await?.unwrap(); + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut( + merkle_tree_account.data.as_mut_slice(), + ) + .unwrap(); + let old_root_index = merkle_tree.root_history.last_index(); + let full_batch_index = merkle_tree.get_account().queue.next_full_batch_index; + let batch = &merkle_tree.batches[full_batch_index as usize]; + let zkp_batch_index = batch.get_num_inserted_zkps(); + let leaves_hashchain = + merkle_tree.hashchain_store[full_batch_index as usize][zkp_batch_index as usize]; + let batch_start_index = batch.start_index; + + let addresses = indexer + .get_queue_elements( + merkle_tree_pubkey.to_bytes(), + full_batch_index, + 0, + batch.batch_size, + ) + .await + .unwrap(); + println!("addresses {:?}", addresses); + // // local_leaves_hashchain is only used for a test assertion. + // let local_nullifier_hashchain = calculate_hash_chain(&addresses); + // assert_eq!(leaves_hashchain, local_nullifier_hashchain); + let start_index = merkle_tree.get_account().next_index as usize; + assert!( + start_index >= 2, + "start index should be greater than 2 else tree is not inited" + ); + let current_root = *merkle_tree.root_history.last().unwrap(); + println!("addresses {:?}", addresses); + let mut low_element_values = Vec::new(); + let mut low_element_indices = Vec::new(); + let mut low_element_next_indices = Vec::new(); + let mut low_element_next_values = Vec::new(); + let mut low_element_proofs: Vec> = Vec::new(); + let non_inclusion_proofs = indexer + .get_multiple_new_address_proofs_full(merkle_tree_pubkey.to_bytes(), addresses.clone()) + .await + .unwrap(); + for non_inclusion_proof in &non_inclusion_proofs { + low_element_values.push(non_inclusion_proof.low_address_value); + low_element_indices.push(non_inclusion_proof.low_address_index as usize); + low_element_next_indices.push(non_inclusion_proof.low_address_next_index as usize); + low_element_next_values.push(non_inclusion_proof.low_address_next_value); + + low_element_proofs.push(non_inclusion_proof.low_address_proof.to_vec()); + } + + let inputs = get_batch_address_append_circuit_inputs::<26>( + start_index, + current_root, + low_element_values, + low_element_next_values, + low_element_indices, + low_element_next_indices, + low_element_proofs, + addresses, + indexer + .get_subtrees(merkle_tree_pubkey.to_bytes()) + .await + .unwrap() + .try_into() + .unwrap(), + leaves_hashchain, + batch_start_index as usize, + batch.zkp_batch_size as usize, + ); + let client = Client::new(); + let circuit_inputs_new_root = bigint_to_be_bytes_array::<32>(&inputs.new_root).unwrap(); + let inputs = to_json(&inputs); + + let response_result = client + .post(&format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) + .header("Content-Type", "text/plain; charset=utf-8") + .body(inputs) + .send() + .await + .expect("Failed to execute request."); + + if response_result.status().is_success() { + let body = response_result.text().await.unwrap(); + let proof_json = deserialize_gnark_proof_json(&body).unwrap(); + let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); + let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); + let instruction_data = InstructionDataBatchNullifyInputs { + public_inputs: BatchProofInputsIx { + new_root: circuit_inputs_new_root, + old_root_index: old_root_index as u16, + }, + compressed_proof: CompressedProof { + a: proof_a, + b: proof_b, + c: proof_c, + }, + }; + Ok(instruction_data) + } else { + Err(RpcError::CustomError( + "Prover failed to generate proof".to_string(), + )) + } +} + +pub async fn perform_rollover_batch_address_merkle_tree( + rpc: &mut R, + forester: &Keypair, + derivation_pubkey: Pubkey, + old_merkle_tree_pubkey: Pubkey, + new_address_merkle_tree_keypair: &Keypair, + epoch: u64, +) -> Result<(Signature, Pubkey), RpcError> { + let payer_pubkey = forester.pubkey(); + let mut account = rpc.get_account(old_merkle_tree_pubkey).await?.unwrap(); + let old_merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::address_tree_from_bytes_mut(account.data.as_mut_slice()) + .unwrap(); + let batch_zero = &old_merkle_tree.batches[0]; + let num_batches = old_merkle_tree.batches.len(); + let old_merkle_tree = old_merkle_tree.get_account(); + let mt_account_size = get_merkle_tree_account_size( + batch_zero.batch_size, + batch_zero.bloom_filter_capacity, + batch_zero.zkp_batch_size, + old_merkle_tree.root_history_capacity, + old_merkle_tree.height, + num_batches as u64, + ); + + let mt_rent = rpc + .get_minimum_balance_for_rent_exemption(mt_account_size) + .await + .unwrap(); + let create_mt_account_ix = create_account_instruction( + &payer_pubkey, + mt_account_size, + mt_rent, + &account_compression::ID, + Some(new_address_merkle_tree_keypair), + ); + + let instruction = light_registry::account_compression_cpi::sdk::create_rollover_batch_address_tree_instruction( + forester.pubkey(), + derivation_pubkey, + old_merkle_tree_pubkey, + new_address_merkle_tree_keypair.pubkey(), + epoch, + ); + + Ok(( + rpc.create_and_send_transaction( + &[create_mt_account_ix, instruction], + &payer_pubkey, + &[forester, new_address_merkle_tree_keypair], + ) + .await?, + new_address_merkle_tree_keypair.pubkey(), + )) +} diff --git a/test-utils/src/test_forester.rs b/test-utils/src/test_forester.rs index f2047787fe..750765c0ab 100644 --- a/test-utils/src/test_forester.rs +++ b/test-utils/src/test_forester.rs @@ -653,45 +653,3 @@ pub async fn update_merkle_tree( ) .await } - -pub async fn insert_addresses( - context: &mut R, - address_queue_pubkey: Pubkey, - address_merkle_tree_pubkey: Pubkey, - addresses: Vec<[u8; 32]>, -) -> Result { - let num_addresses = addresses.len(); - let instruction_data = InsertAddresses { addresses }; - let accounts = account_compression::accounts::InsertIntoQueues { - fee_payer: context.get_payer().pubkey(), - authority: context.get_payer().pubkey(), - registered_program_pda: None, - system_program: system_program::ID, - }; - let insert_ix = Instruction { - program_id: ID, - accounts: [ - accounts.to_account_metas(Some(true)), - vec![ - vec![ - AccountMeta::new(address_queue_pubkey, false), - AccountMeta::new(address_merkle_tree_pubkey, false) - ]; - num_addresses - ] - .iter() - .flat_map(|x| x.to_vec()) - .collect::>(), - ] - .concat(), - data: instruction_data.data(), - }; - let latest_blockhash = context.get_latest_blockhash().await.unwrap(); - let transaction = Transaction::new_signed_with_payer( - &[insert_ix], - Some(&context.get_payer().pubkey()), - &[&context.get_payer()], - latest_blockhash, - ); - context.process_transaction(transaction).await -}