From 404ca4ee45bc6130cbf86d9c61c87ad241177055 Mon Sep 17 00:00:00 2001 From: Jon Gjengset Date: Sat, 19 Oct 2024 10:11:45 +0200 Subject: [PATCH] Release 0.6 with newer hashbrown and no mod raw Fixes #21 so crate will work on Rust 1.82. We can't upgrade to `hashbrown 0.15` since it removes the `raw` table support. This is known upstream, and may just be something we have to live with: https://github.com/rust-lang/hashbrown/issues/545#issuecomment-2303232041 To align with `hashbrown`, and given this is a breaking change regardless, we remove the `raw` module just like `hashbrown` has. This should hopefully increase the chances that one day we can move to `hashbrown`'s new lower-level interface (`HashTable`) without a breaking change. This release also removes the `nightly` feature because it was mostly useless. --- .github/workflows/check.yml | 4 +- .github/workflows/safety.yml | 6 - .github/workflows/test.yml | 16 +-- Cargo.toml | 13 +- README.md | 9 +- codecov.yml | 18 --- src/external_trait_impls/rayon/map.rs | 43 ------- src/external_trait_impls/rayon/raw.rs | 4 +- src/lib.rs | 20 ---- src/map.rs | 106 +---------------- src/raw/mod.rs | 163 +++++++++----------------- src/set.rs | 26 +--- 12 files changed, 80 insertions(+), 348 deletions(-) delete mode 100644 codecov.yml diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index b042b60..23f4a87 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -104,7 +104,7 @@ jobs: # intentionally no target specifier; see https://github.com/jonhoo/rust-ci-conf/pull/4 # --feature-powerset runs for every combination of features - name: cargo hack - run: cargo hack --feature-powerset --exclude-features nightly check + run: cargo hack --feature-powerset check msrv: # check that we can build using the minimal rust version that is specified by this crate runs-on: ubuntu-latest @@ -112,7 +112,7 @@ jobs: # https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability strategy: matrix: - msrv: ["1.60.0"] # 2021 edition requires 1.56 + msrv: ["1.63.0"] name: ubuntu / ${{ matrix.msrv }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/safety.yml b/.github/workflows/safety.yml index 170ae0a..0185feb 100644 --- a/.github/workflows/safety.yml +++ b/.github/workflows/safety.yml @@ -46,12 +46,6 @@ jobs: env: ASAN_OPTIONS: "detect_odr_violation=0:detect_leaks=0" RUSTFLAGS: "-Z sanitizer=address" - - name: cargo test -Zsanitizer=leak - if: always() - run: cargo test --all-features --target x86_64-unknown-linux-gnu - env: - LSAN_OPTIONS: "suppressions=lsan-suppressions.txt" - RUSTFLAGS: "-Z sanitizer=leak" miri: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7dc2f9f..f7540ae 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -39,10 +39,10 @@ jobs: run: cargo generate-lockfile # https://twitter.com/jonhoo/status/1571290371124260865 - name: cargo test --locked - run: cargo test --locked --features serde,rayon,raw --all-targets + run: cargo test --locked --all-features --all-targets # https://github.com/rust-lang/cargo/issues/6669 - name: cargo test --doc - run: cargo test --locked --features serde,rayon,raw --doc + run: cargo test --locked --all-features --doc minimal: # This action chooses the oldest version of the dependencies permitted by Cargo.toml to ensure # that this crate is compatible with the minimal version that this crate and its dependencies @@ -81,7 +81,7 @@ jobs: - name: cargo update -Zminimal-versions run: cargo +nightly update -Zminimal-versions - name: cargo test - run: cargo test --locked --features serde,rayon,raw --all-targets + run: cargo test --locked --all-features --all-targets os-check: # run cargo test on mac and windows runs-on: ${{ matrix.os }} @@ -106,7 +106,7 @@ jobs: if: hashFiles('Cargo.lock') == '' run: cargo generate-lockfile - name: cargo test - run: cargo test --locked --features serde,rayon,raw --all-targets + run: cargo test --locked --all-features --all-targets coverage: # use llvm-cov to build and collect coverage and outputs in a format that # is compatible with codecov.io @@ -130,13 +130,13 @@ jobs: # # for lots of more discussion runs-on: ubuntu-latest - name: ubuntu / nightly / coverage + name: ubuntu / stable / coverage steps: - uses: actions/checkout@v4 with: submodules: true - - name: Install nightly - uses: dtolnay/rust-toolchain@nightly + - name: Install stable + uses: dtolnay/rust-toolchain@stable with: components: llvm-tools-preview - name: cargo install cargo-llvm-cov @@ -145,7 +145,7 @@ jobs: if: hashFiles('Cargo.lock') == '' run: cargo generate-lockfile - name: cargo llvm-cov - run: cargo llvm-cov --locked --features serde,rayon,raw --lcov --output-path lcov.info + run: cargo llvm-cov --locked --all-features --lcov --output-path lcov.info - name: Record Rust version run: echo "RUST=$(rustc --version)" >> "$GITHUB_ENV" - name: Upload to codecov.io diff --git a/Cargo.toml b/Cargo.toml index 3a441b9..fb8adb9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "griddle" -version = "0.5.2" +version = "0.6.0" authors = ["Jon Gjengset "] -edition = "2018" +edition = "2021" license = "MIT OR Apache-2.0" readme = "README.md" @@ -14,9 +14,9 @@ categories = ["data-structures", "no-std"] [dependencies] # For the default hasher -ahash_ = { version = "0.7.0", default-features = false, optional = true, package = "ahash" } +ahash_ = { version = "0.8.0", default-features = false, optional = true, package = "ahash" } -hashbrown = { version = "0.11.2", default-features = false, features = ["raw"] } +hashbrown = { version = "0.14.0", default-features = false, features = ["raw"] } # For external trait impls rayon_ = { version = "1.3.0", optional = true, package = "rayon" } @@ -37,17 +37,14 @@ ahash-compile-time-rng = ["ahash_/compile-time-rng"] ahash = [ "ahash_", "hashbrown/ahash" ] serde = [ "serde_", "hashbrown/serde" ] rayon = [ "rayon_", "hashbrown/rayon" ] -raw = [] # Enables usage of `#[inline]` on far more functions than by default in this # crate. This may lead to a performance increase but often comes at a compile # time cost. inline-more = [ "hashbrown/inline-more" ] -nightly = [] - [package.metadata.docs.rs] -features = ["rayon", "serde", "raw"] +features = ["rayon", "serde"] [[bench]] name = "vroom" diff --git a/README.md b/README.md index 027eeb4..dfdcc5b 100644 --- a/README.md +++ b/README.md @@ -80,18 +80,19 @@ work required to find the buckets that hold elements that must be moved. ## Implementation Griddle uses the -[`hashbrown::raw`](https://docs.rs/hashbrown/0.8/hashbrown/raw/index.html) +[`hashbrown::raw`](https://docs.rs/hashbrown/0.14/hashbrown/raw/index.html) API, which allows it to take advantage of all the awesome work that has gone into making `hashbrown` as fast as it is. The key different parts -of griddle live in `src/raw/mod.rs`. +of griddle live in `src/raw/mod.rs`. The `raw` API was [removed in +`hashbrown` 0.15](https://github.com/rust-lang/hashbrown/issues/545), so +Griddle is stuck on 0.14 for now. Griddle aims to stick as closely to `hashbrown` as it can, both in terms of code and API. `src/map.rs` and `src/set.rs` are virtually identical to the equivalent files in `hashbrown` (I encourage you to diff them!), without only some (currently; [#4](https://github.com/jonhoo/griddle/issues/4)) unsupported API -removed. Like `hashbrown`, griddle exposes a `raw` module, which lets -you build your own map implementation on top of griddle's table variant. +removed. ## Why "griddle"? diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index 332a6f4..0000000 --- a/codecov.yml +++ /dev/null @@ -1,18 +0,0 @@ -# Hold ourselves to a high bar -coverage: - range: 85..100 - round: down - precision: 2 - status: - project: - default: - threshold: 1% - -# Tests aren't important for coverage -ignore: - - "tests" - -# Make less noisy comments -comment: - layout: "files" - require_changes: yes diff --git a/src/external_trait_impls/rayon/map.rs b/src/external_trait_impls/rayon/map.rs index 1020521..f998e0d 100644 --- a/src/external_trait_impls/rayon/map.rs +++ b/src/external_trait_impls/rayon/map.rs @@ -328,55 +328,12 @@ where #[cfg(test)] mod test_par_map { use alloc::vec::Vec; - use core::hash::{Hash, Hasher}; use core::sync::atomic::{AtomicUsize, Ordering}; use rayon_::prelude::*; use crate::hash_map::HashMap; - struct Dropable<'a> { - k: usize, - counter: &'a AtomicUsize, - } - - impl Dropable<'_> { - fn new(k: usize, counter: &AtomicUsize) -> Dropable<'_> { - counter.fetch_add(1, Ordering::Relaxed); - - Dropable { k, counter } - } - } - - impl Drop for Dropable<'_> { - fn drop(&mut self) { - self.counter.fetch_sub(1, Ordering::Relaxed); - } - } - - impl Clone for Dropable<'_> { - fn clone(&self) -> Self { - Dropable::new(self.k, self.counter) - } - } - - impl Hash for Dropable<'_> { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - self.k.hash(state) - } - } - - impl PartialEq for Dropable<'_> { - fn eq(&self, other: &Self) -> bool { - self.k == other.k - } - } - - impl Eq for Dropable<'_> {} - #[test] fn test_empty_iter() { let mut m: HashMap = HashMap::new(); diff --git a/src/external_trait_impls/rayon/raw.rs b/src/external_trait_impls/rayon/raw.rs index 8f4db6a..31da1cd 100644 --- a/src/external_trait_impls/rayon/raw.rs +++ b/src/external_trait_impls/rayon/raw.rs @@ -6,7 +6,7 @@ use rayon_::iter::{ }; /// Parallel iterator which returns a raw pointer to every full bucket in the table. -pub struct RawParIter { +pub(crate) struct RawParIter { iter: hashbrown::raw::rayon::RawParIter, leftovers: Option>, } @@ -50,7 +50,7 @@ impl ParallelIterator for RawParIter { impl RawTable { /// Returns a parallel iterator over the elements in a `RawTable`. #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn par_iter(&self) -> RawParIter { + pub(crate) unsafe fn par_iter(&self) -> RawParIter { RawParIter { iter: self.main().par_iter(), leftovers: self.leftovers().map(|t| t.iter().into()), diff --git a/src/lib.rs b/src/lib.rs index d2ffdd5..657dff9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -66,26 +66,6 @@ extern crate std; #[cfg_attr(test, macro_use)] extern crate alloc; -#[cfg(feature = "raw")] -/// Experimental and unsafe `RawTable` API. This module is only available if the -/// `raw` feature is enabled. -pub mod raw { - #[path = "mod.rs"] - mod inner; - pub use inner::*; - - #[cfg(feature = "rayon")] - /// [rayon]-based parallel iterator types for raw hash tables. - /// You will rarely need to interact with it directly unless you have need - /// to name one of the iterator types. - /// - /// [rayon]: https://docs.rs/rayon/1.0/rayon - pub mod rayon { - pub use crate::external_trait_impls::rayon::raw::*; - } -} -#[cfg(not(feature = "raw"))] -#[allow(dead_code)] mod raw; mod external_trait_impls; diff --git a/src/map.rs b/src/map.rs index 1206432..f054482 100644 --- a/src/map.rs +++ b/src/map.rs @@ -3166,12 +3166,10 @@ fn assert_covariance() { } #[cfg(test)] -#[cfg_attr(coverage, coverage(off))] mod test_map { use super::DefaultHashBuilder; use super::Entry::{Occupied, Vacant}; use super::{HashMap, RawEntryMut}; - use crate::TryReserveError::*; use rand::{rngs::SmallRng, Rng, SeedableRng}; use std::cell::RefCell; use std::usize; @@ -4327,11 +4325,11 @@ mod test_map { let key = "hello there"; let value = "value goes here"; assert!(a.is_empty()); - a.insert(key.clone(), value.clone()); + a.insert(key, value); assert_eq!(a.len(), 1); assert_eq!(a[key], value); - match a.entry(key.clone()) { + match a.entry(key) { Vacant(_) => panic!(), Occupied(e) => assert_eq!(key, *e.key()), } @@ -4346,11 +4344,11 @@ mod test_map { let value = "value goes here"; assert!(a.is_empty()); - match a.entry(key.clone()) { + match a.entry(key) { Occupied(_) => panic!(), Vacant(e) => { assert_eq!(key, *e.key()); - e.insert(value.clone()); + e.insert(value); } } assert_eq!(a.len(), 1); @@ -4446,102 +4444,6 @@ mod test_map { } } - #[test] - #[cfg_attr(miri, ignore)] // FIXME: no OOM signalling (https://github.com/rust-lang/miri/issues/613) - fn test_try_reserve() { - let mut empty_bytes: HashMap = HashMap::new(); - - const MAX_USIZE: usize = usize::MAX; - - if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { - } else { - panic!("usize::MAX should trigger an overflow!"); - } - - if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 8) { - } else { - // This may succeed if there is enough free memory. Attempt to - // allocate a second hashmap to ensure the allocation will fail. - let mut empty_bytes2: HashMap = HashMap::new(); - if let Err(AllocError { .. }) = empty_bytes2.try_reserve(MAX_USIZE / 8) { - } else { - panic!("usize::MAX / 8 should trigger an OOM!"); - } - } - } - - #[test] - #[cfg(feature = "raw")] - fn test_into_iter_refresh() { - use core::hash::{BuildHasher, Hash, Hasher}; - - #[cfg(miri)] - const N: usize = 32; - #[cfg(not(miri))] - const N: usize = 128; - - let mut rng = rand::thread_rng(); - for n in 0..N { - let mut m = HashMap::new(); - for i in 0..n { - assert!(m.insert(i, 2 * i).is_none()); - } - let hasher = m.hasher().clone(); - - let mut it = unsafe { m.table.iter() }; - assert_eq!(it.len(), n); - - let mut i = 0; - let mut left = n; - let mut removed = Vec::new(); - loop { - // occasionally remove some elements - if i < n && rng.gen_bool(0.1) { - let mut hsh = hasher.build_hasher(); - i.hash(&mut hsh); - let hash = hsh.finish(); - - unsafe { - let e = m.table.find(hash, |q| q.0.eq(&i)); - if let Some(e) = e { - it.reflect_remove(&e); - let t = m.table.remove(e); - removed.push(t); - left -= 1; - } else { - assert!(removed.contains(&(i, 2 * i)), "{} not in {:?}", i, removed); - let e = m.table.insert( - hash, - (i, 2 * i), - super::make_hasher::(&hasher), - ); - it.reflect_insert(&e); - if let Some(p) = removed.iter().position(|e| e == &(i, 2 * i)) { - removed.swap_remove(p); - } - left += 1; - } - } - } - - let e = it.next(); - if e.is_none() { - break; - } - assert!(i < n); - let t = unsafe { e.unwrap().as_ref() }; - assert!(!removed.contains(t)); - let (k, v) = t; - assert_eq!(*v, 2 * k); - i += 1; - } - assert!(i <= n); - - // just for safety: - assert_eq!(m.table.len(), left); - } - } - #[test] fn test_raw_entry() { use super::RawEntryMut::{Occupied, Vacant}; diff --git a/src/raw/mod.rs b/src/raw/mod.rs index 9019820..2cd5464 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -12,7 +12,7 @@ use hashbrown::{raw, TryReserveError}; /// This is usually just a pointer to the element itself. However if the element /// is a ZST, then we instead track the index of the element in the table so /// that `erase` works properly. -pub struct Bucket { +pub(crate) struct Bucket { pub(crate) bucket: raw::Bucket, pub(crate) in_main: bool, } @@ -29,7 +29,7 @@ impl Clone for Bucket { impl Bucket { /// Returns true if this bucket is in the "old" table and will be moved. - pub fn will_move(&self) -> bool { + pub(crate) fn will_move(&self) -> bool { !self.in_main } } @@ -47,7 +47,7 @@ impl core::ops::Deref for Bucket { /// resizing. When you interact with this API, keep in mind that there may be two backing tables, /// and a lookup may return a reference to _either_. Eventually, entries in the old table will be /// reclaimed, which invalidates any references to them. -pub struct RawTable { +pub(crate) struct RawTable { table: raw::RawTable, leftovers: Option>, } @@ -59,44 +59,25 @@ impl RawTable { /// leave the data pointer dangling since that bucket is never written to /// due to our load factor forcing us to always have at least 1 free bucket. #[cfg_attr(feature = "inline-more", inline)] - pub const fn new() -> Self { + pub(crate) const fn new() -> Self { Self { table: raw::RawTable::new(), leftovers: None, } } - /// Attempts to allocate a new hash table with at least enough capacity - /// for inserting the given number of elements without reallocating. - #[cfg(feature = "raw")] - pub fn try_with_capacity(capacity: usize) -> Result { - Ok(Self { - table: raw::RawTable::try_with_capacity(capacity)?, - leftovers: None, - }) - } - /// Allocates a new hash table with at least enough capacity for inserting /// the given number of elements without reallocating. - pub fn with_capacity(capacity: usize) -> Self { + pub(crate) fn with_capacity(capacity: usize) -> Self { Self { table: raw::RawTable::with_capacity(capacity), leftovers: None, } } - /// Returns a pointer to an element in the table. - #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn bucket(&self, index: usize) -> Bucket { - Bucket { - bucket: self.table.bucket(index), - in_main: true, - } - } - /// Erases an element from the table, dropping it in place. #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn erase(&mut self, item: Bucket) { + pub(crate) unsafe fn erase(&mut self, item: Bucket) { if item.in_main { self.table.erase(item.bucket); } else if let Some(ref mut lo) = self.leftovers { @@ -107,28 +88,14 @@ impl RawTable { } } - /// Finds and erases an element from the table, dropping it in place. - /// Returns true if an element was found. - #[cfg(feature = "raw")] - #[cfg_attr(feature = "inline-more", inline)] - pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool { - // Avoid `Option::map` because it bloats LLVM IR. - if let Some(bucket) = self.find(hash, eq) { - unsafe { self.erase(bucket) }; - true - } else { - false - } - } - /// Removes an element from the table, returning it. #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn remove(&mut self, item: Bucket) -> T { + pub(crate) unsafe fn remove(&mut self, item: Bucket) -> T { if item.in_main { - self.table.remove(item.bucket) + self.table.remove(item.bucket).0 } else if let Some(ref mut lo) = self.leftovers { lo.items.reflect_remove(&item.bucket); - let v = lo.table.remove(item.bucket); + let (v, _) = lo.table.remove(item.bucket); if lo.table.len() == 0 { let _ = self.leftovers.take(); @@ -142,7 +109,7 @@ impl RawTable { /// Finds and removes an element from the table, returning it. #[cfg_attr(feature = "inline-more", inline)] - pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { + pub(crate) fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { // Avoid `Option::map` because it bloats LLVM IR. match self.find(hash, eq) { Some(bucket) => Some(unsafe { self.remove(bucket) }), @@ -152,7 +119,7 @@ impl RawTable { /// Removes all elements from the table without freeing the backing memory. #[cfg_attr(feature = "inline-more", inline)] - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { let _ = self.leftovers.take(); self.table.clear(); } @@ -162,7 +129,7 @@ impl RawTable { /// In reality, the table may end up larger than `min_size`, as must be able to hold all the /// current elements, as well as some additional elements due to incremental resizing. #[cfg_attr(feature = "inline-more", inline)] - pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { + pub(crate) fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { // Calculate the minimal number of elements that we need to reserve // space for. let mut need = self.table.len(); @@ -185,7 +152,7 @@ impl RawTable { /// /// While we try to make this incremental where possible, it may require all-at-once resizing. #[cfg_attr(feature = "inline-more", inline)] - pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { + pub(crate) fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { let need = self.leftovers.as_ref().map_or(0, |t| t.table.len()) + additional; if self.table.capacity() - self.table.len() > need { // We can accommodate the additional items without resizing, so all is well. @@ -224,7 +191,7 @@ impl RawTable { /// /// While we try to make this incremental where possible, it may require all-at-once resizing. #[cfg_attr(feature = "inline-more", inline)] - pub fn try_reserve( + pub(crate) fn try_reserve( &mut self, additional: usize, hasher: impl Fn(&T) -> u64, @@ -260,7 +227,7 @@ impl RawTable { /// /// This does not check if the given element already exists in the table. #[cfg_attr(feature = "inline-more", inline)] - pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { + pub(crate) fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { if self.table.capacity() == self.table.len() { assert!(self.leftovers.is_none()); // Even though this _may_ succeed without growing due to tombstones, handling @@ -269,14 +236,20 @@ impl RawTable { return self.insert(hash, value, hasher); } - self.insert_no_grow(hash, value, hasher) + // SAFETY: unknown requirements, but _was_ safe + unsafe { self.insert_no_grow(hash, value, hasher) } } /// Inserts a new element into the table, and returns a mutable reference to it. /// /// This does not check if the given element already exists in the table. #[cfg_attr(feature = "inline-more", inline)] - pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T { + pub(crate) fn insert_entry( + &mut self, + hash: u64, + value: T, + hasher: impl Fn(&T) -> u64, + ) -> &mut T { unsafe { self.insert(hash, value, hasher).as_mut() } } @@ -290,8 +263,14 @@ impl RawTable { /// This is because while the insert won't grow the table, it may need to carry over some /// elements from the pre-resize table to the current table, which requires re-hashing. #[cfg_attr(feature = "inline-more", inline)] - pub fn insert_no_grow(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { - let bucket = self.table.insert_no_grow(hash, value); + pub(crate) unsafe fn insert_no_grow( + &mut self, + hash: u64, + value: T, + hasher: impl Fn(&T) -> u64, + ) -> Bucket { + // SAFETY: unknown requirements, but mirrored to caller + let bucket = unsafe { self.table.insert_no_grow(hash, value) }; if self.leftovers.is_some() { // Also carry some items over. @@ -311,7 +290,7 @@ impl RawTable { /// /// This does not check if the given bucket is actually occupied. #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn replace_bucket_with(&mut self, bucket: Bucket, f: F) -> bool + pub(crate) unsafe fn replace_bucket_with(&mut self, bucket: Bucket, f: F) -> bool where F: FnOnce(T) -> Option, { @@ -334,7 +313,7 @@ impl RawTable { /// Searches for an element in the table. #[inline] - pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { + pub(crate) fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { let e = self.table.find(hash, &mut eq); if let Some(bucket) = e { return Some(Bucket { @@ -355,7 +334,7 @@ impl RawTable { /// Gets a reference to an element in the table. #[inline] - pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { + pub(crate) fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { // Avoid `Option::map` because it bloats LLVM IR. match self.find(hash, eq) { Some(bucket) => Some(unsafe { bucket.as_ref() }), @@ -365,7 +344,7 @@ impl RawTable { /// Gets a mutable reference to an element in the table. #[inline] - pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { + pub(crate) fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { // Avoid `Option::map` because it bloats LLVM IR. match self.find(hash, eq) { Some(bucket) => Some(unsafe { bucket.as_mut() }), @@ -378,19 +357,19 @@ impl RawTable { /// This number is a lower bound; the table might be able to hold /// more, but is guaranteed to be able to hold at least this many. #[cfg_attr(feature = "inline-more", inline)] - pub fn capacity(&self) -> usize { + pub(crate) fn capacity(&self) -> usize { self.table.capacity() } /// Returns the number of elements in the table. #[cfg_attr(feature = "inline-more", inline)] - pub fn len(&self) -> usize { + pub(crate) fn len(&self) -> usize { self.table.len() + self.leftovers.as_ref().map_or(0, |t| t.table.len()) } /// Returns the number of buckets in the table. - #[cfg_attr(feature = "inline-more", inline)] - pub fn buckets(&self) -> usize { + #[cfg(test)] + pub(crate) fn buckets(&self) -> usize { self.table.buckets() } @@ -399,7 +378,7 @@ impl RawTable { /// Because we cannot make the `next` method unsafe on the `RawIter` /// struct, we have to make the `iter` method unsafe. #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn iter(&self) -> RawIter { + pub(crate) unsafe fn iter(&self) -> RawIter { RawIter { table: self.table.iter(), leftovers: self.leftovers.as_ref().map(|lo| lo.items.clone()), @@ -409,7 +388,7 @@ impl RawTable { /// Returns an iterator which removes all elements from the table without /// freeing the memory. #[cfg_attr(feature = "inline-more", inline)] - pub fn drain(&mut self) -> RawDrain<'_, T> { + pub(crate) fn drain(&mut self) -> RawDrain<'_, T> { RawDrain { table: self.table.drain(), leftovers: self @@ -425,7 +404,7 @@ impl RawTable { /// /// It is up to the caller to ensure that the iterator is valid for this /// `RawTable` and covers all items that remain in the table. - pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { + pub(crate) unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { RawIntoIter { table: self.table.into_iter_from(iter.table), leftovers: self.leftovers.map(|lo| lo.table.into_iter_from(lo.items)), @@ -449,7 +428,7 @@ fn and_carry_with_hasher( impl RawTable { /// Variant of `clone_from` to use when a hasher is available. - pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) { + pub(crate) fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) { let _ = self.leftovers.take(); self.table.clone_from_with_hasher(&source.table, &hasher); // Since we're doing the work of cloning anyway, we might as well carry the leftovers. @@ -457,7 +436,7 @@ impl RawTable { } /// Variant of `clone` to use when a hasher is available. - pub fn clone_with_hasher(&self, hasher: impl Fn(&T) -> u64) -> Self { + pub(crate) fn clone_with_hasher(&self, hasher: impl Fn(&T) -> u64) -> Self { let mut table = self.table.clone(); // Since we're doing the work of cloning anyway, we might as well carry the leftovers. and_carry_with_hasher(&mut table, &self.leftovers, hasher); @@ -531,7 +510,7 @@ impl RawTable { while let Some(e) = lo.items.next() { // We need to remove the item in this bucket from the old map // to the resized map, without shrinking the old map. - let value = unsafe { lo.table.remove(e) }; + let (value, _) = unsafe { lo.table.remove(e) }; let hash = hasher(&value); self.table.insert(hash, value, &hasher); } @@ -555,9 +534,10 @@ impl RawTable { if let Some(e) = lo.items.next() { // We need to remove the item in this bucket from the old map // to the resized map, without shrinking the old map. - let value = unsafe { lo.table.remove(e) }; + let (value, _) = unsafe { lo.table.remove(e) }; let hash = hasher(&value); - self.table.insert_no_grow(hash, value); + // SAFETY: unknown requirements, but _was_ safe + unsafe { self.table.insert_no_grow(hash, value) }; } else { // The resize is finally fully complete. let _ = self.leftovers.take(); @@ -620,48 +600,11 @@ struct OldTable { /// created will be yielded by that iterator (unless `reflect_insert` is called). /// - The order in which the iterator yields bucket is unspecified and may /// change in the future. -pub struct RawIter { +pub(crate) struct RawIter { table: raw::RawIter, leftovers: Option>, } -impl RawIter { - /// Refresh the iterator so that it reflects a removal from the given bucket. - /// - /// For the iterator to remain valid, this method must be called once - /// for each removed bucket before `next` is called again. - /// - /// This method should be called _before_ the removal is made. It is not necessary to call this - /// method if you are removing an item that this iterator yielded in the past. - #[cfg(feature = "raw")] - pub fn reflect_remove(&mut self, b: &Bucket) { - if b.in_main { - self.table.reflect_remove(b) - } else if let Some(ref mut lo) = self.leftovers { - // NOTE: The actuall call to erase/remove will take care of calling reflect_remove in - // the "main" leftover's iterator. - lo.reflect_remove(b) - } else { - unreachable!("invalid bucket state"); - } - } - - /// Refresh the iterator so that it reflects an insertion into the given bucket. - /// - /// For the iterator to remain valid, this method must be called once - /// for each insert before `next` is called again. - /// - /// This method does not guarantee that an insertion of a bucket witha greater - /// index than the last one yielded will be reflected in the iterator. - /// - /// This method should be called _after_ the given insert is made. - #[cfg(feature = "raw")] - pub fn reflect_insert(&mut self, b: &Bucket) { - assert!(b.in_main, "no insertion can happen into leftovers"); - self.table.reflect_insert(b) - } -} - impl Clone for RawIter { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { @@ -710,7 +653,7 @@ impl ExactSizeIterator for RawIter {} impl FusedIterator for RawIter {} /// Iterator which consumes a table and returns elements. -pub struct RawIntoIter { +pub(crate) struct RawIntoIter { table: raw::RawIntoIter, leftovers: Option>, } @@ -718,7 +661,7 @@ pub struct RawIntoIter { impl RawIntoIter { /// Returns a by-reference iterator over the remaining items of this iterator. #[cfg_attr(feature = "inline-more", inline)] - pub fn iter(&self) -> RawIter { + pub(crate) fn iter(&self) -> RawIter { RawIter { table: self.table.iter(), leftovers: self.leftovers.as_ref().map(|lo| lo.iter()), @@ -751,7 +694,7 @@ impl ExactSizeIterator for RawIntoIter {} impl FusedIterator for RawIntoIter {} /// Iterator which consumes elements without freeing the table storage. -pub struct RawDrain<'a, T> { +pub(crate) struct RawDrain<'a, T> { table: raw::RawDrain<'a, T>, leftovers: Option>, } @@ -759,7 +702,7 @@ pub struct RawDrain<'a, T> { impl RawDrain<'_, T> { /// Returns a by-reference iterator over the remaining items of this iterator. #[cfg_attr(feature = "inline-more", inline)] - pub fn iter(&self) -> RawIter { + pub(crate) fn iter(&self) -> RawIter { RawIter { table: self.table.iter(), leftovers: self.leftovers.as_ref().map(|lo| lo.iter()), diff --git a/src/set.rs b/src/set.rs index 530f0d3..b4e7a47 100644 --- a/src/set.rs +++ b/src/set.rs @@ -1033,18 +1033,6 @@ where fn extend>(&mut self, iter: I) { self.map.extend(iter.into_iter().map(|k| (k, ()))); } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_one(&mut self, k: T) { - self.map.insert(k, ()); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_reserve(&mut self, additional: usize) { - Extend::<(T, ())>::extend_reserve(&mut self.map, additional); - } } impl<'a, T, S> Extend<&'a T> for HashSet @@ -1056,18 +1044,6 @@ where fn extend>(&mut self, iter: I) { self.extend(iter.into_iter().cloned()); } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_one(&mut self, k: &'a T) { - self.map.insert(*k, ()); - } - - #[inline] - #[cfg(feature = "nightly")] - fn extend_reserve(&mut self, additional: usize) { - Extend::<(T, ())>::extend_reserve(&mut self.map, additional); - } } impl Default for HashSet @@ -1709,7 +1685,6 @@ fn assert_covariance() { } #[cfg(test)] -#[cfg_attr(coverage, coverage(off))] mod test_set { use super::super::map::DefaultHashBuilder; use super::HashSet; @@ -2028,6 +2003,7 @@ mod test_set { use core::hash; #[derive(Debug)] + #[allow(dead_code)] struct Foo(&'static str, i32); impl PartialEq for Foo {