Skip to content

Commit

Permalink
Fix typos (#3579)
Browse files Browse the repository at this point in the history
  • Loading branch information
omahs authored Nov 12, 2024
1 parent c54dd21 commit b76604f
Show file tree
Hide file tree
Showing 15 changed files with 33 additions and 33 deletions.
6 changes: 3 additions & 3 deletions accounts-db/src/tiered_storage/hot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat {
account_block_format: AccountBlockFormat::AlignedRaw,
};

/// An helper function that creates a new default footer for hot
/// A helper function that creates a new default footer for hot
/// accounts storage.
fn new_hot_footer() -> TieredStorageFooter {
TieredStorageFooter {
Expand Down Expand Up @@ -380,7 +380,7 @@ impl HotStorageReader {
self.mmap.len()
}

/// Returns whether the nderlying storage is empty.
/// Returns whether the underlying storage is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
Expand Down Expand Up @@ -853,7 +853,7 @@ mod tests {
datas: Vec<Vec<u8>>,
/// path to the hot storage file that was written
file_path: PathBuf,
/// temp directory where the the hot storage file was written
/// temp directory where the hot storage file was written
temp_dir: TempDir,
}

Expand Down
4 changes: 2 additions & 2 deletions bench-tps/src/log_transaction_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ impl LogTransactionService {
tx_log_writer,
block_log_writer,
);
// if last_time is some, it means that the there is at least one valid block
// if last_time is some, it means that there is at least one valid block
if block_time.is_some() {
last_block_time = block_time;
}
Expand Down Expand Up @@ -317,7 +317,7 @@ impl LogTransactionService {
}

/// Remove from map all the signatures which we haven't processed before and they are
/// older than the the timestamp of the last processed block plus max blockhash age.
/// older than the timestamp of the last processed block plus max blockhash age.
fn clean_transaction_map(
tx_log_writer: &mut TransactionLogWriter,
signature_to_tx_info: &mut MapSignatureToTxInfo,
Expand Down
6 changes: 3 additions & 3 deletions core/src/consensus/fork_choice.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ fn last_vote_able_to_land(
my_latest_landed_vote_slot >= last_voted_slot
// 2. Already voting at the tip
|| last_voted_slot >= heaviest_bank_on_same_voted_fork.slot()
// 3. Last vote is withink slot hashes, regular refresh is enough
// 3. Last vote is within slot hashes, regular refresh is enough
|| heaviest_bank_on_same_voted_fork
.is_in_slot_hashes_history(&last_voted_slot)
}
Expand Down Expand Up @@ -123,7 +123,7 @@ fn recheck_fork_decision_failed_switch_threshold(
return SwitchForkDecision::SameFork;
}

// If we can't switch, then reset to the the next votable bank on the same
// If we can't switch, then reset to the next votable bank on the same
// fork as our last vote, but don't vote.

// We don't just reset to the heaviest fork when switch threshold fails because
Expand Down Expand Up @@ -409,7 +409,7 @@ fn can_vote_on_candidate_bank(
/// longer being valid to vote on, it's possible that a validator will not
/// be able to reset away from the invalid fork that they last voted on. To
/// resolve this scenario, validators need to wait until they can create a
/// switch proof for another fork or until the invalid fork is be marked
/// switch proof for another fork or until the invalid fork is marked
/// valid again if it was confirmed by the cluster.
/// Until this is resolved, leaders will build each of their
/// blocks from the last reset bank on the invalid fork.
Expand Down
6 changes: 3 additions & 3 deletions core/src/consensus/heaviest_subtree_fork_choice.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1171,7 +1171,7 @@ impl HeaviestSubtreeForkChoice {
//
// In this scenario only 60% of the network has voted before the duplicate proof for Slot 1 and 1'
// was viewed. Neither version of the slot will reach the duplicate confirmed threshold, so it is
// critical that a new fork Slot 2 from Slot 0 is created to allow the the validators on Slot 1 and
// critical that a new fork Slot 2 from Slot 0 is created to allow the validators on Slot 1 and
// Slot 1' to switch. Since the `best_slot` is an ancestor of the last vote (Slot 0 is ancestor of last
// vote Slot 1 or Slot 1'), we will trigger `SwitchForkDecision::FailedSwitchDuplicateRollback`, which
// will create an alternate fork off of Slot 0. Once this alternate fork is created, the `best_slot`
Expand Down Expand Up @@ -3519,7 +3519,7 @@ mod test {
(vote_pubkeys[1], duplicate_leaves_descended_from_5[0]),
];

// The best slot should be the the smallest leaf descended from 4
// The best slot should be the smallest leaf descended from 4
assert_eq!(
heaviest_subtree_fork_choice.add_votes(
pubkey_votes.iter(),
Expand Down Expand Up @@ -3565,7 +3565,7 @@ mod test {
..,
) = setup_mark_invalid_forks_duplicate_tests();

// Marking candidate as valid again will choose the the heaviest leaf of
// Marking candidate as valid again will choose the heaviest leaf of
// the newly valid branch
let duplicate_slot = duplicate_leaves_descended_from_4[0].0;
let duplicate_descendant = (duplicate_slot + 1, Hash::new_unique());
Expand Down
6 changes: 3 additions & 3 deletions curves/secp256k1-recover/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ solana_define_syscall::define_syscall!(fn sol_secp256k1_recover(hash: *const u8,
/// # Hashing messages
///
/// In ECDSA signing and key recovery the signed "message" is always a
/// crytographic hash, not the original message itself. If not a cryptographic
/// cryptographic hash, not the original message itself. If not a cryptographic
/// hash, then an adversary can craft signatures that recover to arbitrary
/// public keys. This means the caller of this function generally must hash the
/// original message themselves and not rely on another party to provide the
Expand Down Expand Up @@ -228,7 +228,7 @@ solana_define_syscall::define_syscall!(fn sol_secp256k1_recover(hash: *const u8,
/// lengths of `hash` and `signature` beforehand.
///
/// When run on-chain this function will not directly validate the lengths of
/// `hash` and `signature`. It will assume they are the the correct lengths and
/// `hash` and `signature`. It will assume they are the correct lengths and
/// pass their pointers to the runtime, which will interpret them as 32-byte and
/// 64-byte buffers. If the provided slices are too short, the runtime will read
/// invalid data and attempt to interpret it, most likely returning an error,
Expand All @@ -239,7 +239,7 @@ solana_define_syscall::define_syscall!(fn sol_secp256k1_recover(hash: *const u8,
///
/// # Examples
///
/// This example demonstrates recovering a public key and using it to very a
/// This example demonstrates recovering a public key and using it to verify a
/// signature with the `secp256k1_recover` syscall. It has three parts: a Solana
/// program, an RPC client to call the program, and common definitions shared
/// between the two.
Expand Down
2 changes: 1 addition & 1 deletion docs/src/backwards-compatibility.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ communication.
### Deprecation Process

1. In any `PATCH` or `MINOR` release, a feature, API, endpoint, etc. could be marked as deprecated.
2. According to code upgrade difficulty, some features will be remain deprecated for a few release
2. According to code upgrade difficulty, some features will remain deprecated for a few release
cycles.
3. In a future `MAJOR` release, deprecated features will be removed in an incompatible way.

Expand Down
2 changes: 1 addition & 1 deletion docs/src/validator/tpu.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ second(PPS) and applied the limit to the connection based on the stake.
Higher stakes offers better bandwidth. If the transfer rate is exceeded,
the server can drop the stream with the error code (15 -- STREAM_STOP_CODE_THROTTLING).
The client is expected to do some sort of exponential back off in retrying the
transactionswhen running into this situation.
transactions when running into this situation.

* sigverify stage: deduplicates packets and applies some load-shedding
to remove excessive packets before then filtering packets with invalid
Expand Down
4 changes: 2 additions & 2 deletions dos/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ fn create_sender_thread(
stats_count += len;
total_count += len;
if iterations != 0 && total_count >= iterations {
info!("All transactions has been sent");
info!("All transactions have been sent");
// dropping receiver to signal generator threads to stop
drop(tx_receiver);
break;
Expand Down Expand Up @@ -360,7 +360,7 @@ fn create_generator_thread<T: 'static + TpsClient + Send + Sync>(
// and hence this choice of n provides large enough number of permutations
let mut keypairs_flat: Vec<Keypair> = Vec::new();
// 1000 is arbitrary number. In case of permutation_size > 1,
// this guaranties large enough set of unique permutations
// this guarantees large enough set of unique permutations
let permutation_size = get_permutation_size(
transaction_params.num_signatures.as_ref(),
transaction_params.num_instructions.as_ref(),
Expand Down
2 changes: 1 addition & 1 deletion local-cluster/tests/local_cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5328,7 +5328,7 @@ fn test_boot_from_local_state_missing_archive() {
// 0
// \--- 2
//
// 1. > DUPLICATE_THRESHOLD of the nodes vote on some version of the the duplicate block 3,
// 1. > DUPLICATE_THRESHOLD of the nodes vote on some version of the duplicate block 3,
// but don't immediately duplicate confirm so they remove 3 from fork choice and reset PoH back to 1.
// 2. All the votes on 3 don't land because there are no further blocks building off 3.
// 3. Some < SWITCHING_THRESHOLD of nodes vote on 2, making it the heaviest fork because no votes on 3 landed
Expand Down
4 changes: 2 additions & 2 deletions sdk/file-download/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ fn new_spinner_progress_bar() -> ProgressBar {
pub struct DownloadProgressRecord {
// Duration since the beginning of the download
pub elapsed_time: Duration,
// Duration since the the last notification
// Duration since the last notification
pub last_elapsed_time: Duration,
// the bytes/sec speed measured for the last notification period
pub last_throughput: f32,
Expand All @@ -43,7 +43,7 @@ pub struct DownloadProgressRecord {
pub current_bytes: usize,
// percentage downloaded
pub percentage_done: f32,
// Estimated remaining time (in seconds) to finish the download if it keeps at the the last download speed
// Estimated remaining time (in seconds) to finish the download if it keeps at the last download speed
pub estimated_remaining_time: f32,
// The times of the progress is being notified, it starts from 1 and increments by 1 each time
pub notification_count: u64,
Expand Down
2 changes: 1 addition & 1 deletion sdk/src/client.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//! Defines traits for blocking (synchronous) and non-blocking (asynchronous)
//! communication with a Solana server as well a a trait that encompasses both.
//! communication with a Solana server as well as a trait that encompasses both.
//!
//! //! Synchronous implementations are expected to create transactions, sign them, and send
//! them with multiple retries, updating blockhashes and resigning as-needed.
Expand Down
14 changes: 7 additions & 7 deletions sdk/src/secp256k1_instruction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
//! used carefully to ensure proper security. Read this documentation and
//! accompanying links thoroughly._
//!
//! The secp26k1 native program peforms flexible verification of [secp256k1]
//! The secp26k1 native program performs flexible verification of [secp256k1]
//! ECDSA signatures, as used by Ethereum. It can verify up to 255 signatures on
//! up to 255 messages, with those signatures, messages, and their public keys
//! arbitrarily distributed across the instruction data of any instructions in
Expand Down Expand Up @@ -41,14 +41,14 @@
//! [EIP-712]: https://eips.ethereum.org/EIPS/eip-712
//!
//! The [`new_secp256k1_instruction`] function is suitable for building a
//! secp256k1 program instruction for basic use cases were a single message must
//! secp256k1 program instruction for basic use cases where a single message must
//! be signed by a known secret key. For other uses cases, including many
//! Ethereum-integration use cases, construction of the secp256k1 instruction
//! must be done manually.
//!
//! # How to use this program
//!
//! Transactions that uses the secp256k1 native program will typically include
//! Transactions that use the secp256k1 native program will typically include
//! at least two instructions: one for the secp256k1 program to verify the
//! signatures, and one for a custom program that will check that the secp256k1
//! instruction data matches what the program expects (using
Expand All @@ -67,7 +67,7 @@
//! - In the client:
//! - Sign the [`keccak`]-hashed messages with a secp256k1 ECDSA library,
//! like the [`libsecp256k1`] crate.
//! - Build any custom instruction data that contain signature, message, or
//! - Build any custom instruction data that contains signature, message, or
//! Ethereum address data that will be used by the secp256k1 instruction.
//! - Build the secp256k1 program instruction data, specifying the number of
//! signatures to verify, the instruction indexes within the transaction,
Expand Down Expand Up @@ -98,7 +98,7 @@
//! Many steps must be done manually.
//!
//! The `solana_program` crate provides no APIs to assist in interpreting
//! the the secp256k1 instruction data. It must be done manually.
//! the secp256k1 instruction data. It must be done manually.
//!
//! The secp256k1 program is implemented with the [`libsecp256k1`] crate,
//! which clients may also want to use.
Expand Down Expand Up @@ -464,7 +464,7 @@
//!
//! ## Example: Verifying multiple signatures in one instruction
//!
//! This examples demonstrates manually creating a secp256k1 instruction
//! This example demonstrates manually creating a secp256k1 instruction
//! containing many signatures, and a Solana program that parses them all. This
//! example on its own has no practical purpose. It simply demonstrates advanced
//! use of the secp256k1 program.
Expand Down Expand Up @@ -911,7 +911,7 @@ pub fn construct_eth_pubkey(

/// Verifies the signatures specified in the secp256k1 instruction data.
///
/// This is same the verification routine executed by the runtime's secp256k1 native program,
/// This is the same as the verification routine executed by the runtime's secp256k1 native program,
/// and is primarily of use to the runtime.
///
/// `data` is the secp256k1 program's instruction data. `instruction_datas` is
Expand Down
2 changes: 1 addition & 1 deletion send-transaction-service/src/tpu_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use {
pub trait TpuInfo {
fn refresh_recent_peers(&mut self);
fn get_leader_tpus(&self, max_count: u64, protocol: Protocol) -> Vec<&SocketAddr>;
/// In addition to the the tpu address, also return the leader slot
/// In addition to the tpu address, also return the leader slot
fn get_leader_tpus_with_slots(
&self,
max_count: u64,
Expand Down
4 changes: 2 additions & 2 deletions tpu-client-next/src/connection_workers_scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ impl ConnectionWorkersScheduler {
}
Err(err) => {
warn!("Connection to {new_leader} was closed, worker error: {err}");
// If we has failed to send batch, it will be dropped.
// If we have failed to send batch, it will be dropped.
}
}
}
Expand Down Expand Up @@ -234,7 +234,7 @@ impl ConnectionWorkersScheduler {
/// Splits `leaders` into two slices based on the `fanout` configuration:
/// * the first slice contains the leaders to which transactions will be sent,
/// * the second vector contains the leaders, used to warm up connections. This
/// slice includes the the first set.
/// slice includes the first set.
fn split_leaders<'leaders>(
leaders: &'leaders [SocketAddr],
fanout: &Fanout,
Expand Down
2 changes: 1 addition & 1 deletion wen-restart/src/wen_restart.rs
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,7 @@ pub(crate) fn generate_snapshot(
};
// In very rare cases it's possible that the local root is not on the heaviest fork, so the
// validator generated snapshot for slots > local root. If the cluster agreed upon restart
// slot my_heaviest_fork_slot is less than the the current highest full_snapshot_slot, that means the
// slot my_heaviest_fork_slot is less than the current highest full_snapshot_slot, that means the
// locally rooted full_snapshot_slot will be rolled back. this requires human inspection。
//
// In even rarer cases, the selected slot might be the latest full snapshot slot. We could
Expand Down

0 comments on commit b76604f

Please sign in to comment.