Skip to content

Commit

Permalink
perf: Use papaya over dashmap
Browse files Browse the repository at this point in the history
  • Loading branch information
XAMPPRocky committed Nov 25, 2024
1 parent 57040f8 commit 787de96
Show file tree
Hide file tree
Showing 14 changed files with 294 additions and 497 deletions.
86 changes: 86 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,8 @@ libflate = "2.1.0"
form_urlencoded = "1.2.1"
enum_dispatch = "0.3.13"
gxhash = "3.4.1"
papaya = { version = "0.1.3", features = ["serde"] }
seize = "0.4.5"

[dependencies.hyper-util]
version = "0.1"
Expand Down
11 changes: 3 additions & 8 deletions benches/cluster_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ mod serde {
fn serialize_to_protobuf(cm: &ClusterMap) -> Vec<Any> {
let mut resources = Vec::new();

for cluster in cm.iter() {
for (key, cluster) in cm.pin().iter() {
resources.push(
Resource::Cluster(Cluster {
locality: cluster.key().clone().map(From::from),
locality: key.clone().map(From::from),
endpoints: cluster
.endpoints
.iter()
Expand Down Expand Up @@ -110,12 +110,7 @@ mod ops {
use shared::{gen_cluster_map, GenCluster};

fn compute_hash<const S: u64>(gc: &GenCluster) -> usize {
let mut total_endpoints = 0;

for kv in gc.cm.iter() {
total_endpoints += kv.endpoints.len();
}

let total_endpoints = gc.cm.pin().values().map(|v| v.endpoints.len()).sum();
assert_eq!(total_endpoints, gc.total_endpoints);
total_endpoints
}
Expand Down
2 changes: 1 addition & 1 deletion benches/shared.rs
Original file line number Diff line number Diff line change
Expand Up @@ -676,7 +676,7 @@ pub fn gen_cluster_map<const S: u64>(token_kind: TokenKind) -> GenCluster {

// Now actually insert the endpoints, now that the order of keys is established,
// annoying, but note we split out iteration versus insertion, otherwise we deadlock
let keys: Vec<_> = cm.iter().map(|kv| kv.key().clone()).collect();
let keys: Vec<_> = cm.pin().iter().map(|(key, _)| key.clone()).collect();
let mut sets = std::collections::BTreeMap::new();

let mut token_generator = match token_kind {
Expand Down
4 changes: 2 additions & 2 deletions benches/token_router.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ fn token_router(b: Bencher, token_kind: &str) {
let cm = std::sync::Arc::new(gc.cm);

// Calculate the amount of bytes for all the tokens
for eps in cm.iter() {
for ep in &eps.value().endpoints {
for eps in cm.pin().values() {
for ep in &eps.endpoints {
for tok in &ep.metadata.known.tokens {
tokens.push(tok.clone());
}
Expand Down
5 changes: 3 additions & 2 deletions crates/test/tests/mesh.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,8 +189,9 @@ trace_test!(datacenter_discovery, {
#[track_caller]
fn assert_config(config: &quilkin::Config, datacenter: &quilkin::config::Datacenter) {
let dcs = config.datacenters().read();
let ipv4_dc = dcs.get(&std::net::Ipv4Addr::LOCALHOST.into());
let ipv6_dc = dcs.get(&std::net::Ipv6Addr::LOCALHOST.into());
let pin = dcs.pin();
let ipv4_dc = pin.get(&std::net::Ipv4Addr::LOCALHOST.into());
let ipv6_dc = pin.get(&std::net::Ipv6Addr::LOCALHOST.into());

match (ipv4_dc, ipv6_dc) {
(Some(dc), None) => assert_eq!(&*dc, datacenter),
Expand Down
1 change: 1 addition & 0 deletions deny.toml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ allow = ["Apache-2.0", "MIT", "ISC", "BSD-3-Clause", "Unicode-3.0"]
exceptions = [
{ crate = "adler32", allow = ["Zlib"] },
{ crate = "foldhash", allow = ["Zlib"] },
{ crate = "atomic-wait", allow = ["BSD-2-Clause"] },
# This license should not really be used for code, but here we are
{ crate = "notify", allow = ["CC0-1.0"] },
{ crate = "ring", allow = ["OpenSSL"] },
Expand Down
Loading

0 comments on commit 787de96

Please sign in to comment.