From 14bafdd1093ba357a601ae02c5e1587ac4739a06 Mon Sep 17 00:00:00 2001 From: Kleinmarb <104095495+Kleinmarb@users.noreply.github.com> Date: Mon, 29 Jul 2024 22:34:49 +0200 Subject: [PATCH] Fix all typos I could find (#93) * Fix all typos I could find * Fix typos in README.md --- README.md | 4 ++-- src/gxhash/mod.rs | 2 +- src/gxhash/platform/arm.rs | 2 +- src/gxhash/platform/mod.rs | 4 ++-- src/gxhash/platform/x86.rs | 2 +- src/hasher.rs | 7 ++++--- 6 files changed, 11 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 3aabacb..dae13c6 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,7 @@ The `hybrid` feature flag enables a hybrid implementation of GxHash. This is dis ## Benchmarks [![Benchmark](https://github.com/ogxd/gxhash/actions/workflows/bench.yml/badge.svg)](https://github.com/ogxd/gxhash/actions/workflows/bench.yml) -GxHash is continuously benchmarked on X86 and ARM Github runners. +GxHash is continuously benchmarked on X86 and ARM GitHub runners. To run the benchmarks locally use one of the following: ```bash @@ -96,7 +96,7 @@ Throughput is measured as the number of bytes hashed per second. *Some prefer talking **latency** (time for generating a hash) or **hashrate** (the number of hashes generated per second) for measuring hash function performance, but those are all equivalent in the end as they all boil down to measuring the time it takes to hash some input and then apply different scalar transformation. For instance, if latency for a `4 bytes` hash is `1 ms`, then the throughput is `1 / 0.001 * 4 = 4000 bytes per second`. Throughput allows us to conveniently compare the performance of a hash function for any input size on a single graph.* -**Lastest Benchmark Results:** +**Latest Benchmark Results:** ![aarch64](./benches/throughput/aarch64.svg) ![x86_64](./benches/throughput/x86_64.svg) ![x86_64-hybrid](./benches/throughput/x86_64-hybrid.svg) diff --git a/src/gxhash/mod.rs b/src/gxhash/mod.rs index 7fff5ed..d5995e6 100644 --- a/src/gxhash/mod.rs +++ b/src/gxhash/mod.rs @@ -137,7 +137,7 @@ unsafe fn compress_many(mut ptr: *const State, end: usize, hash_vector: State, l let remaining_bytes = remaining_bytes - unrollable_blocks_count * VECTOR_SIZE; let end_address = ptr.add(remaining_bytes / VECTOR_SIZE) as usize; - // Process first individual blocks until we have an whole number of 8 blocks + // Process first individual blocks until we have a whole number of 8 blocks let mut hash_vector = hash_vector; while (ptr as usize) < end_address { load_unaligned!(ptr, v0); diff --git a/src/gxhash/platform/arm.rs b/src/gxhash/platform/arm.rs index c7a550e..083e6e8 100644 --- a/src/gxhash/platform/arm.rs +++ b/src/gxhash/platform/arm.rs @@ -77,7 +77,7 @@ pub unsafe fn compress_8(mut ptr: *const State, end_address: usize, hash_vector: let mut t2: State = create_empty(); // Hash is processed in two separate 128-bit parallel lanes - // This allows the same processing to be applied using 256-bit V-AES instrinsics + // This allows the same processing to be applied using 256-bit V-AES intrinsics // so that hashes are stable in both cases. let mut lane1 = hash_vector; let mut lane2 = hash_vector; diff --git a/src/gxhash/platform/mod.rs b/src/gxhash/platform/mod.rs index 2dc7071..f40d676 100644 --- a/src/gxhash/platform/mod.rs +++ b/src/gxhash/platform/mod.rs @@ -11,7 +11,7 @@ pub use platform::*; use core::mem::size_of; pub(crate) const VECTOR_SIZE: usize = size_of::(); -// 4KiB is the default page size for most systems, and conservative for other systems such as MacOS ARM (16KiB) +// 4KiB is the default page size for most systems, and conservative for other systems such as macOS ARM (16KiB) const PAGE_SIZE: usize = 0x1000; #[inline(always)] @@ -29,7 +29,7 @@ unsafe fn check_same_page(ptr: *const State) -> bool { let address = ptr as usize; // Mask to keep only the last 12 bits let offset_within_page = address & (PAGE_SIZE - 1); - // Check if the 16nd byte from the current offset exceeds the page boundary + // Check if the 16th byte from the current offset exceeds the page boundary offset_within_page < PAGE_SIZE - VECTOR_SIZE } diff --git a/src/gxhash/platform/x86.rs b/src/gxhash/platform/x86.rs index c85c521..a5735f1 100644 --- a/src/gxhash/platform/x86.rs +++ b/src/gxhash/platform/x86.rs @@ -74,7 +74,7 @@ pub unsafe fn compress_8(mut ptr: *const State, end_address: usize, hash_vector: let mut t2: State = create_empty(); // Hash is processed in two separate 128-bit parallel lanes - // This allows the same processing to be applied using 256-bit V-AES instrinsics + // This allows the same processing to be applied using 256-bit V-AES intrinsics // so that hashes are stable in both cases. let mut lane1 = hash_vector; let mut lane2 = hash_vector; diff --git a/src/hasher.rs b/src/hasher.rs index 3010403..f8f72a3 100644 --- a/src/hasher.rs +++ b/src/hasher.rs @@ -6,10 +6,11 @@ use crate::gxhash::*; /// A `Hasher` for hashing an arbitrary stream of bytes. /// # Features /// - The fastest [`Hasher`] of its class1, for all input sizes -/// - Highly collision resitant +/// - Highly collision resistant /// - DOS resistance thanks to seed randomization when using [`GxHasher::default()`] /// -/// *1There might me faster alternatives, such as `fxhash` for very small input sizes, but that usually have low quality properties.* +/// *1There might be faster alternatives, such as `fxhash` for very small input sizes, +/// but that usually have low quality properties.* #[derive(Clone, Debug)] pub struct GxHasher { state: State, @@ -76,7 +77,7 @@ impl GxHasher { GxHasher::with_state(unsafe { create_seed(seed) }) } - /// Finish this hasher and return the hashed value as a 128 bit + /// Finish this hasher and return the hashed value as a 128-bit /// unsigned integer. #[inline] pub fn finish_u128(&self) -> u128 {