diff --git a/Cargo.lock b/Cargo.lock index 301eae03fd..71d3c67711 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10519,6 +10519,7 @@ name = "xtask" version = "1.1.0" dependencies = [ "account-compression", + "anchor-lang", "anyhow", "ark-bn254 0.5.0", "ark-ff 0.5.0", diff --git a/program-libs/batched-merkle-tree/src/initialize_address_tree.rs b/program-libs/batched-merkle-tree/src/initialize_address_tree.rs index ce513ad8c9..0313967159 100644 --- a/program-libs/batched-merkle-tree/src/initialize_address_tree.rs +++ b/program-libs/batched-merkle-tree/src/initialize_address_tree.rs @@ -7,6 +7,7 @@ use light_merkle_tree_metadata::{ use crate::{ constants::{ + ADDRESS_BLOOM_FILTER_CAPACITY, ADDRESS_BLOOM_FILTER_NUM_HASHES, DEFAULT_ADDRESS_ZKP_BATCH_SIZE, DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_ROOT_HISTORY_LEN, DEFAULT_BATCH_SIZE, }, @@ -39,12 +40,12 @@ impl Default for InitAddressTreeAccountsInstructionData { index: 0, program_owner: None, forester: None, - bloom_filter_num_iters: 3, + bloom_filter_num_iters: ADDRESS_BLOOM_FILTER_NUM_HASHES, input_queue_batch_size: DEFAULT_BATCH_SIZE, input_queue_zkp_batch_size: DEFAULT_ADDRESS_ZKP_BATCH_SIZE, height: 40, root_history_capacity: DEFAULT_BATCH_ROOT_HISTORY_LEN, - bloom_filter_capacity: DEFAULT_BATCH_SIZE * 8, + bloom_filter_capacity: ADDRESS_BLOOM_FILTER_CAPACITY, network_fee: Some(5000), rollover_threshold: Some(95), close_threshold: None, @@ -182,12 +183,12 @@ pub mod test_utils { index: 0, program_owner: None, forester: None, - bloom_filter_num_iters: 3, + bloom_filter_num_iters: ADDRESS_BLOOM_FILTER_NUM_HASHES, input_queue_batch_size: TEST_DEFAULT_BATCH_SIZE, input_queue_zkp_batch_size: TEST_DEFAULT_ZKP_BATCH_SIZE, height: 40, root_history_capacity: DEFAULT_BATCH_ROOT_HISTORY_LEN, - bloom_filter_capacity: 20_000 * 8, + bloom_filter_capacity: ADDRESS_BLOOM_FILTER_CAPACITY, network_fee: Some(5000), rollover_threshold: Some(95), close_threshold: None, @@ -199,12 +200,12 @@ pub mod test_utils { index: 0, program_owner: None, forester: None, - bloom_filter_num_iters: 3, + bloom_filter_num_iters: ADDRESS_BLOOM_FILTER_NUM_HASHES, input_queue_batch_size: 500, input_queue_zkp_batch_size: TEST_DEFAULT_ZKP_BATCH_SIZE, height: 40, root_history_capacity: DEFAULT_BATCH_ROOT_HISTORY_LEN, - bloom_filter_capacity: 20_000 * 8, + bloom_filter_capacity: ADDRESS_BLOOM_FILTER_CAPACITY, network_fee: Some(5000), rollover_threshold: Some(95), close_threshold: None, diff --git a/program-libs/batched-merkle-tree/src/initialize_state_tree.rs b/program-libs/batched-merkle-tree/src/initialize_state_tree.rs index 55b1f734bb..f8e30e1043 100644 --- a/program-libs/batched-merkle-tree/src/initialize_state_tree.rs +++ b/program-libs/batched-merkle-tree/src/initialize_state_tree.rs @@ -8,8 +8,9 @@ use light_merkle_tree_metadata::{ use crate::{ constants::{ - DEFAULT_BATCH_SIZE, DEFAULT_BATCH_STATE_TREE_HEIGHT, DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, - DEFAULT_ZKP_BATCH_SIZE, + ADDRESS_BLOOM_FILTER_CAPACITY, ADDRESS_BLOOM_FILTER_NUM_HASHES, + DEFAULT_BATCH_ROOT_HISTORY_LEN, DEFAULT_BATCH_SIZE, DEFAULT_BATCH_STATE_TREE_HEIGHT, + DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, DEFAULT_ZKP_BATCH_SIZE, }, errors::BatchedMerkleTreeError, merkle_tree::{get_merkle_tree_account_size, BatchedMerkleTreeAccount}, @@ -48,14 +49,14 @@ impl Default for InitStateTreeAccountsInstructionData { program_owner: None, forester: None, additional_bytes: DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, - bloom_filter_num_iters: 3, + bloom_filter_num_iters: ADDRESS_BLOOM_FILTER_NUM_HASHES, input_queue_batch_size: DEFAULT_BATCH_SIZE, output_queue_batch_size: DEFAULT_BATCH_SIZE, input_queue_zkp_batch_size: DEFAULT_ZKP_BATCH_SIZE, output_queue_zkp_batch_size: DEFAULT_ZKP_BATCH_SIZE, height: DEFAULT_BATCH_STATE_TREE_HEIGHT, - root_history_capacity: (DEFAULT_BATCH_SIZE / DEFAULT_ZKP_BATCH_SIZE * 2) as u32, - bloom_filter_capacity: DEFAULT_BATCH_SIZE * 8, + root_history_capacity: DEFAULT_BATCH_ROOT_HISTORY_LEN, + bloom_filter_capacity: ADDRESS_BLOOM_FILTER_CAPACITY, network_fee: Some(5000), rollover_threshold: Some(95), close_threshold: None, @@ -259,7 +260,7 @@ pub mod test_utils { program_owner: None, forester: None, additional_bytes: DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, - bloom_filter_num_iters: 3, + bloom_filter_num_iters: ADDRESS_BLOOM_FILTER_NUM_HASHES, input_queue_batch_size: TEST_DEFAULT_BATCH_SIZE, output_queue_batch_size: TEST_DEFAULT_BATCH_SIZE, input_queue_zkp_batch_size: TEST_DEFAULT_ZKP_BATCH_SIZE, @@ -279,7 +280,7 @@ pub mod test_utils { program_owner: None, forester: None, additional_bytes: DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, - bloom_filter_num_iters: 3, + bloom_filter_num_iters: ADDRESS_BLOOM_FILTER_NUM_HASHES, input_queue_batch_size: 500, output_queue_batch_size: 500, input_queue_zkp_batch_size: TEST_DEFAULT_ZKP_BATCH_SIZE, diff --git a/scripts/create-batched-state-trees.sh b/scripts/create-batched-state-trees.sh new file mode 100755 index 0000000000..21f623889c --- /dev/null +++ b/scripts/create-batched-state-trees.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Base directory for keypairs +KEYPAIR_DIR="../light-keypairs/batched-tree-keypairs" + +# Command template +CMD_TEMPLATE="cargo xtask create-batch-state-tree --mt-pubkey {SMT} --nfq-pubkey {NFQ} --cpi-pubkey {CPI} --index {INDEX} --network devnet" + +# Collect sorted key files for each type +SMT_KEYS=($(ls $KEYPAIR_DIR/bmt*.json | sort)) +NFQ_KEYS=($(ls $KEYPAIR_DIR/oq*.json | sort)) +CPI_KEYS=($(ls $KEYPAIR_DIR/cpi*.json | sort)) + +# Ensure equal number of keys for each type +if [[ ${#SMT_KEYS[@]} -ne ${#NFQ_KEYS[@]} || ${#NFQ_KEYS[@]} -ne ${#CPI_KEYS[@]} ]]; then + echo "Error: Mismatched number of SMT, NFQ, and CPI key files." + exit 1 +fi + +# Execute the command for each triple +for i in "${!SMT_KEYS[@]}"; do + SMT_KEY="${SMT_KEYS[i]}" + NFQ_KEY="${NFQ_KEYS[i]}" + CPI_KEY="${CPI_KEYS[i]}" + INDEX=$((i + 30)) + + # Replace placeholders in the command template + CMD=${CMD_TEMPLATE//\{SMT\}/"$SMT_KEY"} + CMD=${CMD//\{NFQ\}/"$NFQ_KEY"} + CMD=${CMD//\{CPI\}/"$CPI_KEY"} + CMD=${CMD//\{INDEX\}/"$INDEX"} + + echo "Executing: $CMD" + eval "$CMD" + +done + +echo "All batch state trees created." + +# Create batch address tree using the first amt keypair +echo "Creating batch address tree..." +AMT_KEY="$(ls $KEYPAIR_DIR/amt*.json | sort | head -n 1)" +ADDR_CMD="cargo xtask create-batch-address-tree --mt-pubkey $AMT_KEY --network devnet" +echo "Executing: $ADDR_CMD" +eval "$ADDR_CMD" + +echo "All commands executed." diff --git a/scripts/create-state-trees.sh b/scripts/create-state-trees.sh old mode 100755 new mode 100644 diff --git a/scripts/deploy-devnet-upgrade.sh b/scripts/deploy-devnet-upgrade.sh new file mode 100755 index 0000000000..80573a6dee --- /dev/null +++ b/scripts/deploy-devnet-upgrade.sh @@ -0,0 +1,49 @@ +# assumes that programs have been build with build-verifiable.sh +# Creates buffer accounts +# Buffer account addresses can be used in multisig action + +# Array of program names +libraries=("account_compression" "light_compressed_token" "light_system_program_pinocchio" "light_registry") +program_ids=("compr6CUsB5m2jS4Y3831ztGSTnDpnKJTKS95d64XVq" "cTokenmWW8bLPjZEBAUgYy3zKxQZW6VKi7bqNFEVv3m" "SySTEM1eSU2p4BGQfQpimFEWWSC1XDFeun3Nqzz3rT7" "Lighton6oQpVkeewmo2mcPTQQp7kYHr4fWpAgJyEmDX") + +BUFFER_KEYPAIR_PATH="target/buffer" + + +create_buffer_account() { + local max_retries=5 + local attempt=1 + + local program_name="$1" + local program_id="$2" + + while (( attempt <= max_retries )); do + echo "Attempt $attempt of $max_retries..." + echo "$BUFFER_KEYPAIR_PATH/$program_name-keypair.json" + echo "Program ID for $program_name: $program_id" + if solana program deploy target/deploy/"$program_name".so --program-id $program_id --buffer "$BUFFER_KEYPAIR_PATH/$program_name-keypair.json" --upgrade-authority ../../.config/solana/id.json; then + echo "Command succeeded on attempt $attempt." + return 0 + else + echo "Command failed on attempt $attempt." + ((attempt++)) + sleep 2 + fi + ((attempt++)) + done + + echo "Command failed after $max_retries attempts." + return 1 +} + + + +# Iterate over each program and create buffer accounts +for i in "${!program_ids[@]}"; do + program_id="${program_ids[$i]}" + program_name="${libraries[$i]}" + + if [[ ! -f "$BUFFER_KEYPAIR_PATH/$program_name-keypair.json" ]]; then + solana-keygen new --outfile "$BUFFER_KEYPAIR_PATH/$program_name-keypair.json" --no-bip39-passphrase + fi + create_buffer_account "$program_name" "$program_id" +done diff --git a/scripts/install.sh b/scripts/install.sh old mode 100755 new mode 100644 diff --git a/sdk-libs/program-test/src/accounts/state_tree_v2.rs b/sdk-libs/program-test/src/accounts/state_tree_v2.rs index b3c4bb6184..77b48bc0fd 100644 --- a/sdk-libs/program-test/src/accounts/state_tree_v2.rs +++ b/sdk-libs/program-test/src/accounts/state_tree_v2.rs @@ -89,7 +89,7 @@ pub async fn create_batched_state_merkle_tree( data: instruction.data(), } }; - + println!("instruction {:?}", instruction); rpc.create_and_send_transaction( &[ create_mt_account_ix, diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 8258907b2f..0e900e11e7 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -33,3 +33,4 @@ solana-client = { workspace = true } solana-transaction-status = { workspace = true } light-batched-merkle-tree = { workspace = true } light-registry = { workspace = true } +anchor-lang = { workspace = true } diff --git a/xtask/src/create_batch_address_tree.rs b/xtask/src/create_batch_address_tree.rs index 1cde15c0c0..148fe1f0aa 100644 --- a/xtask/src/create_batch_address_tree.rs +++ b/xtask/src/create_batch_address_tree.rs @@ -91,6 +91,7 @@ pub async fn create_batch_address_tree(options: Options) -> anyhow::Result<()> { "creating address Merkle tree: \n\tmt {:?}", merkle_tree_keypair.pubkey(), ); + println!("config {:?}", config); let balance = rpc.get_balance(&payer.pubkey()).await.unwrap(); println!("Payer balance: {:?}", balance); let tx_hash = diff --git a/xtask/src/create_batch_state_tree.rs b/xtask/src/create_batch_state_tree.rs index 7afb0b411b..d98e3aea23 100644 --- a/xtask/src/create_batch_state_tree.rs +++ b/xtask/src/create_batch_state_tree.rs @@ -104,7 +104,7 @@ pub async fn create_batch_state_tree(options: Options) -> anyhow::Result<()> { }; println!("read payer: {:?}", payer.pubkey()); - let config = if let Some(config) = options.config { + let mut config = if let Some(config) = options.config { if config == "testnet" { InitStateTreeAccountsInstructionData::testnet_default() } else { @@ -113,6 +113,7 @@ pub async fn create_batch_state_tree(options: Options) -> anyhow::Result<()> { } else { InitStateTreeAccountsInstructionData::default() }; + config.index = options.index as u64; for ((merkle_tree_keypair, nullifier_queue_keypair), cpi_context_keypair) in mt_keypairs .iter() @@ -126,6 +127,7 @@ pub async fn create_batch_state_tree(options: Options) -> anyhow::Result<()> { cpi_context_keypair.pubkey(), options.index ); + println!("config {:?}", config); let balance = rpc.get_balance(&payer.pubkey()).await.unwrap(); println!("Payer balance: {:?}", balance); let tx_hash = create_batched_state_merkle_tree( diff --git a/xtask/src/main.rs b/xtask/src/main.rs index df08d63a57..4e4b36cea2 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -10,6 +10,7 @@ mod export_photon_test_data; mod fee; mod hash_set; mod new_deployment; +mod resize_registered_program_pda; mod type_sizes; mod utils; mod zero_bytes; @@ -57,6 +58,9 @@ enum Command { InitNewDeployment(new_deployment::Options), /// cargo xtask create-update-protocol-config --slot-length CreateUpdateProtocolConfigIx(create_update_protocol_config_ix::Options), + /// Resize registered program PDA to match current program structure + /// Example: cargo xtask resize-registered-program-pda --network devnet + ResizeRegisteredProgramPda(resize_registered_program_pda::Options), } #[tokio::main] @@ -89,5 +93,8 @@ async fn main() -> Result<(), anyhow::Error> { Command::CreateUpdateProtocolConfigIx(opts) => { create_update_protocol_config_ix::create_update_protocol_config_ix(opts).await } + Command::ResizeRegisteredProgramPda(opts) => { + resize_registered_program_pda::resize_registered_program_pda(opts).await + } } } diff --git a/xtask/src/resize_registered_program_pda.rs b/xtask/src/resize_registered_program_pda.rs new file mode 100644 index 0000000000..461d2d5b94 --- /dev/null +++ b/xtask/src/resize_registered_program_pda.rs @@ -0,0 +1,115 @@ +use std::{path::PathBuf, str::FromStr}; + +use anchor_lang::{InstructionData, ToAccountMetas}; +use clap::Parser; +use dirs::home_dir; +use light_client::rpc::{LightClient, LightClientConfig, Rpc}; +use solana_sdk::{ + instruction::Instruction, + signature::{read_keypair_file, Signer}, +}; + +#[derive(Debug, Parser)] +pub struct Options { + #[clap(long)] + payer: Option, + /// mainnet, devnet, local, default: local + #[clap(long)] + network: Option, + /// mainnet, testnet + #[clap(long)] + config: Option, +} + +pub async fn resize_registered_program_pda(options: Options) -> anyhow::Result<()> { + let rpc_url = if let Some(network) = options.network { + if network == "local" { + String::from("http://127.0.0.1:8899") + } else if network == "devnet" { + String::from("https://api.devnet.solana.com") + } else if network == "mainnet" { + String::from("https://api.mainnet-beta.solana.com") + } else { + network.to_string() + } + } else { + String::from("http://127.0.0.1:8899") + }; + + let mut rpc = LightClient::new(LightClientConfig { + url: rpc_url, + photon_url: None, + commitment_config: None, + fetch_active_tree: false, + api_key: None, + }) + .await + .unwrap(); + + let payer = if let Some(payer_path) = options.payer { + read_keypair_file(payer_path).expect("Failed to read payer keypair") + } else { + let home_dir = home_dir().unwrap(); + let payer_path = home_dir.join(".config/solana/id.json"); + read_keypair_file(payer_path).expect("Failed to read payer keypair") + }; + + // Programs to resize + let programs_to_resize = vec![ + ( + "Light System Program", + solana_sdk::pubkey::Pubkey::from_str("SySTEM1eSU2p4BGQfQpimFEWWSC1XDFeun3Nqzz3rT7") + .unwrap(), + ), + ( + "Light Registry Program", + solana_sdk::pubkey::Pubkey::from_str("Lighton6oQpVkeewmo2mcPTQQp7kYHr4fWpAgJyEmDX") + .unwrap(), + ), + ]; + + for (program_name, program_id) in programs_to_resize { + println!("Resizing registered program PDA for {}", program_name); + println!("Program ID: {}", program_id); + + // Calculate the registered program PDA + let registered_program_pda = solana_sdk::pubkey::Pubkey::find_program_address( + &[program_id.to_bytes().as_slice()], + &account_compression::ID, + ) + .0; + + println!("Registered program PDA: {}", registered_program_pda); + + let instruction_data = account_compression::instruction::ResizeRegisteredProgramPda {}; + let accounts = account_compression::accounts::ResizeRegisteredProgramPda { + authority: payer.pubkey(), + registered_program_pda, + system_program: solana_sdk::system_program::ID, + }; + + let instruction = Instruction { + program_id: account_compression::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + }; + + println!("Sending resize transaction for {}...", program_name); + match rpc + .create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer]) + .await + { + Ok(signature) => { + println!("✓ Successfully resized {} PDA!", program_name); + println!(" Transaction signature: {}", signature); + } + Err(e) => { + println!("✗ Failed to resize {} PDA: {}", program_name, e); + // Continue with the next program instead of failing entirely + } + } + println!(); + } + + Ok(()) +}