Skip to content

Commit

Permalink
Merge branch 'master' into remove-lamports-per-sig-param
Browse files Browse the repository at this point in the history
  • Loading branch information
tao-stones authored Jan 25, 2024
2 parents 6241072 + f0d67d7 commit 595eac2
Show file tree
Hide file tree
Showing 26 changed files with 1,196 additions and 255 deletions.
21 changes: 21 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ aquamarine = "0.3.3"
aes-gcm-siv = "0.10.3"
ahash = "0.8.7"
anyhow = "1.0.79"
arbitrary = "1.3.2"
ark-bn254 = "0.4.0"
ark-ec = "0.4.0"
ark-ff = "0.4.0"
Expand Down
88 changes: 85 additions & 3 deletions accounts-cluster-bench/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -509,6 +509,7 @@ fn run_accounts_bench(
close_nth_batch: u64,
maybe_lamports: Option<u64>,
num_instructions: usize,
max_accounts: Option<usize>,
mint: Option<Pubkey>,
reclaim_accounts: bool,
rpc_benches: Option<Vec<RpcBench>>,
Expand Down Expand Up @@ -682,14 +683,30 @@ fn run_accounts_bench(
}

count += 1;
if last_log.elapsed().as_millis() > 3000 || (count >= iterations && iterations != 0) {
let max_accounts_met = if let Some(max_accounts) = max_accounts {
total_accounts_created >= max_accounts
} else {
false
};
if last_log.elapsed().as_millis() > 3000
|| (count >= iterations && iterations != 0)
|| max_accounts_met
{
info!(
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
);
last_log = Instant::now();
}
if iterations != 0 && count >= iterations {
info!("{iterations} iterations reached");
break;
}
if max_accounts_met {
info!(
"Max account limit of {:?} reached",
max_accounts.unwrap_or_default()
);
break;
}
if executor.num_outstanding() >= batch_size {
Expand Down Expand Up @@ -873,16 +890,23 @@ fn main() {
Arg::with_name("num_instructions")
.long("num-instructions")
.takes_value(true)
.value_name("NUM")
.value_name("NUM_INSTRUCTIONS")
.help("Number of accounts to create on each transaction"),
)
.arg(
Arg::with_name("iterations")
.long("iterations")
.takes_value(true)
.value_name("NUM")
.value_name("NUM_ITERATIONS")
.help("Number of iterations to make. 0 = unlimited iterations."),
)
.arg(
Arg::with_name("max_accounts")
.long("max-accounts")
.takes_value(true)
.value_name("NUM_ACCOUNTS")
.help("Halt after client has created this number of accounts. Does not count closed accounts."),
)
.arg(
Arg::with_name("check_gossip")
.long("check-gossip")
Expand All @@ -892,6 +916,7 @@ fn main() {
Arg::with_name("mint")
.long("mint")
.takes_value(true)
.value_name("MINT_ADDRESS")
.help("Mint address to initialize account"),
)
.arg(
Expand All @@ -904,12 +929,14 @@ fn main() {
Arg::with_name("num_rpc_bench_threads")
.long("num-rpc-bench-threads")
.takes_value(true)
.value_name("NUM_THREADS")
.help("Spawn this many RPC benching threads for each type passed by --rpc-bench"),
)
.arg(
Arg::with_name("rpc_bench")
.long("rpc-bench")
.takes_value(true)
.value_name("RPC_BENCH_TYPE(S)")
.multiple(true)
.help("Spawn a thread which calls a specific RPC method in a loop to benchmark it"),
)
Expand All @@ -922,6 +949,7 @@ fn main() {
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0);
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
let max_accounts = value_t!(matches, "max_accounts", usize).ok();
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
if num_instructions == 0 || num_instructions > 500 {
eprintln!("bad num_instructions: {num_instructions}");
Expand Down Expand Up @@ -1015,6 +1043,7 @@ fn main() {
close_nth_batch,
lamports,
num_instructions,
max_accounts,
mint,
matches.is_present("reclaim_accounts"),
rpc_benches,
Expand Down Expand Up @@ -1091,6 +1120,58 @@ pub mod test {
close_nth_batch,
maybe_lamports,
num_instructions,
None,
mint,
reclaim_accounts,
Some(vec![RpcBench::ProgramAccounts]),
1,
);
let post_txs = client.get_transaction_count().unwrap();
start.stop();
info!("{} pre {} post {}", start, pre_txs, post_txs);
}

#[test]
fn test_halt_accounts_creation_at_max() {
solana_logger::setup();
let mut validator_config = ValidatorConfig::default_for_test();
let num_nodes = 1;
add_secondary_indexes(&mut validator_config.account_indexes);
add_secondary_indexes(&mut validator_config.rpc_config.account_indexes);
let mut config = ClusterConfig {
cluster_lamports: 10_000_000,
poh_config: PohConfig::new_sleep(Duration::from_millis(50)),
node_stakes: vec![100; num_nodes],
validator_configs: make_identical_validator_configs(&validator_config, num_nodes),
..ClusterConfig::default()
};

let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let iterations = 100;
let maybe_space = None;
let batch_size = 20;
let close_nth_batch = 0;
let maybe_lamports = None;
let num_instructions = 2;
let mut start = Measure::start("total accounts run");
let rpc_addr = cluster.entry_point_info.rpc().unwrap();
let client = Arc::new(RpcClient::new_socket_with_commitment(
rpc_addr,
CommitmentConfig::confirmed(),
));
let mint = None;
let reclaim_accounts = false;
let pre_txs = client.get_transaction_count().unwrap();
run_accounts_bench(
client.clone(),
&[&cluster.funding_keypair],
iterations,
maybe_space,
batch_size,
close_nth_batch,
maybe_lamports,
num_instructions,
Some(90),
mint,
reclaim_accounts,
Some(vec![RpcBench::ProgramAccounts]),
Expand Down Expand Up @@ -1190,6 +1271,7 @@ pub mod test {
close_nth_batch,
Some(minimum_balance),
num_instructions,
None,
Some(spl_mint_keypair.pubkey()),
true,
None,
Expand Down
42 changes: 35 additions & 7 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,9 @@ use {
append_vec::{
aligned_stored_size, AppendVec, APPEND_VEC_MMAPPED_FILES_OPEN, STORE_META_OVERHEAD,
},
cache_hash_data::{CacheHashData, CacheHashDataFileReference},
cache_hash_data::{
CacheHashData, CacheHashDataFileReference, DeletionPolicy as CacheHashDeletionPolicy,
},
contains::Contains,
epoch_accounts_hash::EpochAccountsHashManager,
in_mem_accounts_index::StartupStats,
Expand Down Expand Up @@ -7149,6 +7151,29 @@ impl AccountsDb {
})
.collect::<Vec<_>>();

// Calculate the hits and misses of the hash data files cache.
// This is outside of the parallel loop above so that we only need to
// update each atomic stat value once.
// There are approximately 173 items in the cache files list,
// so should be very fast to iterate and compute.
// (173 cache files == 432,000 slots / 2,5000 slots-per-cache-file)
let mut hits = 0;
let mut misses = 0;
for cache_file in &cache_files {
match cache_file {
ScanAccountStorageResult::CacheFileAlreadyExists(_) => hits += 1,
ScanAccountStorageResult::CacheFileNeedsToBeCreated(_) => misses += 1,
};
}
cache_hash_data
.stats
.hits
.fetch_add(hits, Ordering::Relaxed);
cache_hash_data
.stats
.misses
.fetch_add(misses, Ordering::Relaxed);

// deletes the old files that will not be used before creating new ones
cache_hash_data.delete_old_cache_files();

Expand Down Expand Up @@ -7549,10 +7574,13 @@ impl AccountsDb {
_ = std::fs::remove_dir_all(&failed_dir);
failed_dir
};
CacheHashData::new(
accounts_hash_cache_path,
(kind == CalcAccountsHashKind::Incremental).then_some(storages_start_slot),
)
let deletion_policy = match kind {
CalcAccountsHashKind::Full => CacheHashDeletionPolicy::AllUnused,
CalcAccountsHashKind::Incremental => {
CacheHashDeletionPolicy::UnusedAtLeast(storages_start_slot)
}
};
CacheHashData::new(accounts_hash_cache_path, deletion_policy)
}

// modeled after calculate_accounts_delta_hash
Expand Down Expand Up @@ -9775,7 +9803,7 @@ pub mod tests {
let temp_dir = TempDir::new().unwrap();
let accounts_hash_cache_path = temp_dir.path().to_path_buf();
self.scan_snapshot_stores_with_cache(
&CacheHashData::new(accounts_hash_cache_path, None),
&CacheHashData::new(accounts_hash_cache_path, CacheHashDeletionPolicy::AllUnused),
storage,
stats,
bins,
Expand Down Expand Up @@ -10843,7 +10871,7 @@ pub mod tests {
};

let result = accounts_db.scan_account_storage_no_bank(
&CacheHashData::new(accounts_hash_cache_path, None),
&CacheHashData::new(accounts_hash_cache_path, CacheHashDeletionPolicy::AllUnused),
&CalcAccountsHashConfig::default(),
&get_storage_refs(&[storage]),
test_scan,
Expand Down
56 changes: 36 additions & 20 deletions accounts-db/src/cache_hash_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,7 @@ impl CacheHashDataFile {
pub(crate) struct CacheHashData {
cache_dir: PathBuf,
pre_existing_cache_files: Arc<Mutex<HashSet<PathBuf>>>,
/// Decides which old cache files to delete. See `delete_old_cache_files()` for more info.
storages_start_slot: Option<Slot>,
deletion_policy: DeletionPolicy,
pub stats: Arc<CacheHashDataStats>,
}

Expand All @@ -206,15 +205,15 @@ impl Drop for CacheHashData {
}

impl CacheHashData {
pub(crate) fn new(cache_dir: PathBuf, storages_start_slot: Option<Slot>) -> CacheHashData {
pub(crate) fn new(cache_dir: PathBuf, deletion_policy: DeletionPolicy) -> CacheHashData {
std::fs::create_dir_all(&cache_dir).unwrap_or_else(|err| {
panic!("error creating cache dir {}: {err}", cache_dir.display())
});

let result = CacheHashData {
cache_dir,
pre_existing_cache_files: Arc::new(Mutex::new(HashSet::default())),
storages_start_slot,
deletion_policy,
stats: Arc::default(),
};

Expand All @@ -229,21 +228,24 @@ impl CacheHashData {
let mut old_cache_files =
std::mem::take(&mut *self.pre_existing_cache_files.lock().unwrap());

// If `storages_start_slot` is None, we're doing a full accounts hash calculation, and thus
// all unused cache files can be deleted.
// If `storages_start_slot` is Some, we're doing an incremental accounts hash calculation,
// and we only want to delete the unused cache files *that IAH considered*.
if let Some(storages_start_slot) = self.storages_start_slot {
old_cache_files.retain(|old_cache_file| {
let Some(parsed_filename) = parse_filename(old_cache_file) else {
// if parsing the cache filename fails, we *do* want to delete it
return true;
};

// if the old cache file is in the incremental accounts hash calculation range,
// then delete it
parsed_filename.slot_range_start >= storages_start_slot
});
match self.deletion_policy {
DeletionPolicy::AllUnused => {
// no additional work to do here; we will delete everything in `old_cache_files`
}
DeletionPolicy::UnusedAtLeast(storages_start_slot) => {
// when calculating an incremental accounts hash, we only want to delete the unused
// cache files *that IAH considered*
old_cache_files.retain(|old_cache_file| {
let Some(parsed_filename) = parse_filename(old_cache_file) else {
// if parsing the cache filename fails, we *do* want to delete it
return true;
};

// if the old cache file is in the incremental accounts hash calculation range,
// then delete it
parsed_filename.slot_range_start >= storages_start_slot
});
}
}

if !old_cache_files.is_empty() {
Expand Down Expand Up @@ -410,6 +412,19 @@ fn parse_filename(cache_filename: impl AsRef<Path>) -> Option<ParsedFilename> {
})
}

/// Decides which old cache files to delete
///
/// See `delete_old_cache_files()` for more info.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum DeletionPolicy {
/// Delete *all* the unused cache files
/// Should be used when calculating full accounts hash
AllUnused,
/// Delete *only* the unused cache files with starting slot range *at least* this slot
/// Should be used when calculating incremental accounts hash
UnusedAtLeast(Slot),
}

#[cfg(test)]
mod tests {
use {super::*, crate::accounts_hash::AccountHash, rand::Rng};
Expand Down Expand Up @@ -477,7 +492,8 @@ mod tests {
data_this_pass.push(this_bin_data);
}
}
let cache = CacheHashData::new(cache_dir.clone(), None);
let cache =
CacheHashData::new(cache_dir.clone(), DeletionPolicy::AllUnused);
let file_name = PathBuf::from("test");
cache.save(&file_name, &data_this_pass).unwrap();
cache.get_cache_files();
Expand Down
Loading

0 comments on commit 595eac2

Please sign in to comment.