diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 5ed223bbbbc9bf..b4a74f83568116 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -8997,8 +8997,10 @@ impl AccountsDb { let schedule = &genesis_config.epoch_schedule; let rent_collector = RentCollector::new( schedule.get_epoch(max_slot), + #[allow(clippy::clone_on_copy)] schedule.clone(), genesis_config.slots_per_year(), + #[allow(clippy::clone_on_copy)] genesis_config.rent.clone(), ); let accounts_data_len = AtomicU64::new(0); diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 3e972c813db4db..be0492c545896f 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -1349,6 +1349,7 @@ mod test { fn new(bank_forks: Arc>) -> Self { let ancestor_hashes_request_statuses = Arc::new(DashMap::new()); let ancestor_hashes_request_socket = Arc::new(UdpSocket::bind("0.0.0.0:0").unwrap()); + #[allow(clippy::clone_on_copy)] let epoch_schedule = bank_forks .read() .unwrap() diff --git a/core/src/repair/repair_weight.rs b/core/src/repair/repair_weight.rs index 7e65cfaa232658..6872c8065446ad 100644 --- a/core/src/repair/repair_weight.rs +++ b/core/src/repair/repair_weight.rs @@ -2553,6 +2553,7 @@ mod test { let stake = 100; let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys_for_tests(10, stake); let mut epoch_stakes = bank.epoch_stakes_map().clone(); + #[allow(clippy::clone_on_copy)] let mut epoch_schedule = bank.epoch_schedule().clone(); // Simulate epoch boundary at slot 10, where half of the stake deactivates diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 0c7dccf24a3537..e6ad1ca9eb5022 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -36,6 +36,7 @@ pub(crate) struct ShredFetchStage { impl ShredFetchStage { // updates packets received on a channel and sends them on another channel + #[allow(clippy::clone_on_copy)] fn modify_packets( recvr: PacketBatchReceiver, sendr: Sender, diff --git a/core/src/tvu.rs b/core/src/tvu.rs index feaf0a9834d17c..b5a1ad1dcd2df7 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -200,6 +200,7 @@ impl Tvu { let (dumped_slots_sender, dumped_slots_receiver) = unbounded(); let (popular_pruned_forks_sender, popular_pruned_forks_receiver) = unbounded(); let window_service = { + #[allow(clippy::clone_on_copy)] let epoch_schedule = bank_forks .read() .unwrap() diff --git a/genesis/src/stakes.rs b/genesis/src/stakes.rs index 133fdf57f4968b..b1f545b218fa6f 100644 --- a/genesis/src/stakes.rs +++ b/genesis/src/stakes.rs @@ -246,6 +246,7 @@ mod tests { let total_lamports = staker_reserve + reserve * 2 + 1; create_and_check_stakes( &mut GenesisConfig { + #[allow(clippy::clone_on_copy)] rent: rent.clone(), ..GenesisConfig::default() }, @@ -272,6 +273,7 @@ mod tests { let total_lamports = staker_reserve + reserve * 2 + 1; create_and_check_stakes( &mut GenesisConfig { + #[allow(clippy::clone_on_copy)] rent: rent.clone(), ..GenesisConfig::default() }, @@ -298,6 +300,7 @@ mod tests { let total_lamports = staker_reserve + (granularity + reserve) * 2; create_and_check_stakes( &mut GenesisConfig { + #[allow(clippy::clone_on_copy)] rent: rent.clone(), ..GenesisConfig::default() }, @@ -323,6 +326,7 @@ mod tests { let total_lamports = staker_reserve + (granularity + reserve + 1) * 2; create_and_check_stakes( &mut GenesisConfig { + #[allow(clippy::clone_on_copy)] rent: rent.clone(), ..GenesisConfig::default() }, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 4fa5fa6f3aa808..4e093814b9bb3a 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -3887,6 +3887,7 @@ pub mod tests { AccountSecondaryIndexes::default(), AccountShrinkThreshold::default(), ); + #[allow(clippy::clone_on_copy)] bank.epoch_schedule().clone() } diff --git a/ledger/src/leader_schedule_cache.rs b/ledger/src/leader_schedule_cache.rs index f847f6ce2871fe..62328ad71f0bdd 100644 --- a/ledger/src/leader_schedule_cache.rs +++ b/ledger/src/leader_schedule_cache.rs @@ -40,6 +40,7 @@ pub struct LeaderScheduleCache { impl LeaderScheduleCache { pub fn new_from_bank(bank: &Bank) -> Self { + #[allow(clippy::clone_on_copy)] Self::new(bank.epoch_schedule().clone(), bank) } diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 6d500528d5e236..fb3d5b32b7ca4f 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -3362,8 +3362,10 @@ mod tests { let mut sysvar_cache = SysvarCache::default(); sysvar_cache.set_clock(src_clock.clone()); + #[allow(clippy::clone_on_copy)] sysvar_cache.set_epoch_schedule(src_epochschedule.clone()); sysvar_cache.set_fees(src_fees.clone()); + #[allow(clippy::clone_on_copy)] sysvar_cache.set_rent(src_rent.clone()); sysvar_cache.set_epoch_rewards(src_rewards); diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index f5901374d9b6d9..27af32efd286f0 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -1298,6 +1298,7 @@ mod tests { let processor_account = AccountSharedData::new(0, 0, &solana_sdk::native_loader::id()); let transaction_context = TransactionContext::new( vec![(id(), processor_account), (node_pubkey, vote_account)], + #[allow(clippy::clone_on_copy)] rent.clone(), 0, 0, diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 9b892ade4d7766..60458ff3add646 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -636,6 +636,7 @@ impl JsonRpcRequestProcessor { // Since epoch schedule data comes from the genesis config, any commitment level should be // fine let bank = self.bank(Some(CommitmentConfig::finalized())); + #[allow(clippy::clone_on_copy)] bank.epoch_schedule().clone() } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index fb8a36408a6679..8b217d3b8cf679 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1212,6 +1212,7 @@ impl Bank { parent.freeze(); assert_ne!(slot, parent.slot()); + #[allow(clippy::clone_on_copy)] let epoch_schedule = parent.epoch_schedule().clone(); let epoch = epoch_schedule.get_epoch(slot); @@ -1920,6 +1921,7 @@ impl Bank { fee_rate_governor: self.fee_rate_governor.clone(), collected_rent: self.collected_rent.load(Relaxed), rent_collector: self.rent_collector.clone(), + #[allow(clippy::clone_on_copy)] epoch_schedule: self.epoch_schedule.clone(), inflation: *self.inflation.read().unwrap(), stakes: &self.stakes_cache, @@ -3748,6 +3750,7 @@ impl Bank { self.parent_hash } + #[allow(clippy::clone_on_copy)] fn process_genesis_config( &mut self, genesis_config: &GenesisConfig, @@ -4781,6 +4784,7 @@ impl Bank { let mut transaction_context = TransactionContext::new( transaction_accounts, + #[allow(clippy::clone_on_copy)] self.rent_collector.rent.clone(), compute_budget.max_invoke_stack_height, compute_budget.max_instruction_trace_length, @@ -7076,6 +7080,7 @@ impl Bank { if config.run_in_background { let ancestors = ancestors.clone(); let accounts = Arc::clone(accounts); + #[allow(clippy::clone_on_copy)] let epoch_schedule = epoch_schedule.clone(); let rent_collector = rent_collector.clone(); let accounts_ = Arc::clone(&accounts); diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index f5623c550a24bf..61c7bfd3fda850 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -148,6 +148,7 @@ impl AccountsPackage { expected_capitalization: bank.capitalization(), accounts_hash_for_testing, accounts: bank.accounts(), + #[allow(clippy::clone_on_copy)] epoch_schedule: bank.epoch_schedule().clone(), rent_collector: bank.rent_collector().clone(), is_incremental_accounts_hash_feature_enabled, diff --git a/sdk/macro/src/lib.rs b/sdk/macro/src/lib.rs index 157592dc37bcaa..f72dcdfcf8eb2f 100644 --- a/sdk/macro/src/lib.rs +++ b/sdk/macro/src/lib.rs @@ -430,6 +430,7 @@ pub fn derive_clone_zeroed(input: proc_macro::TokenStream) -> proc_macro::TokenS // implementations on `Copy` types are simply wrappers of `Copy`. // This is not the case here, and intentionally so because we want to // guarantee zeroed padding. + #[allow(clippy::incorrect_clone_impl_on_copy_type)] fn clone(&self) -> Self { let mut value = std::mem::MaybeUninit::::uninit(); unsafe { diff --git a/sdk/program/src/epoch_schedule.rs b/sdk/program/src/epoch_schedule.rs index cd3fa59c6d9cd3..9d046cb3dbaad9 100644 --- a/sdk/program/src/epoch_schedule.rs +++ b/sdk/program/src/epoch_schedule.rs @@ -29,7 +29,7 @@ pub const MAX_LEADER_SCHEDULE_EPOCH_OFFSET: u64 = 3; pub const MINIMUM_SLOTS_PER_EPOCH: u64 = 32; #[repr(C)] -#[derive(Debug, CloneZeroed, PartialEq, Eq, Deserialize, Serialize, AbiExample)] +#[derive(Debug, CloneZeroed, Copy, PartialEq, Eq, Deserialize, Serialize, AbiExample)] #[serde(rename_all = "camelCase")] pub struct EpochSchedule { /// The maximum number of slots in each epoch. diff --git a/sdk/program/src/rent.rs b/sdk/program/src/rent.rs index 6d670542e2b6b5..5d173bb77ad14b 100644 --- a/sdk/program/src/rent.rs +++ b/sdk/program/src/rent.rs @@ -8,7 +8,7 @@ use {crate::clock::DEFAULT_SLOTS_PER_EPOCH, solana_sdk_macro::CloneZeroed}; /// Configuration of network rent. #[repr(C)] -#[derive(Serialize, Deserialize, PartialEq, CloneZeroed, Debug, AbiExample)] +#[derive(Serialize, Deserialize, PartialEq, CloneZeroed, Copy, Debug, AbiExample)] pub struct Rent { /// Rental rate in lamports/byte-year. pub lamports_per_byte_year: u64, diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index e0e4c17e866932..dddde5d7896b4e 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -843,6 +843,7 @@ impl TestValidator { validator_stake_lamports, validator_identity_lamports, config.fee_rate_governor.clone(), + #[allow(clippy::clone_on_copy)] config.rent.clone(), solana_sdk::genesis_config::ClusterType::Development, accounts.into_iter().collect(),