From e1792d7f25ced3906bf21ea6ef1fa6137e7f4d00 Mon Sep 17 00:00:00 2001 From: Sebastiano Vigna Date: Wed, 1 Nov 2023 14:36:39 +0100 Subject: [PATCH] pr -> pl --- Cargo.toml | 2 +- README.md | 5 +++ examples/bv_bf_visit.rs | 11 ++++--- src/algorithms/bfs_order.rs | 13 ++++---- src/algorithms/llp.rs | 41 +++++++++++++------------ src/algorithms/simplify.rs | 6 ++-- src/algorithms/transpose.rs | 6 ++-- src/bin/ascii_convert.rs | 10 +++--- src/bin/build_eliasfano.rs | 33 +++++++++++--------- src/bin/build_offsets.rs | 17 +++++----- src/bin/optimize_codes.rs | 15 ++++----- src/bin/perm.rs | 22 ++++++------- src/bin/test_eliasfano.rs | 26 ++++++++-------- src/graph/bvgraph/bvgraph_writer_par.rs | 17 +++++----- src/traits/graph.rs | 12 ++++---- tests/test_bvcomp.rs | 10 +++--- tests/test_par_bvcomp.rs | 9 +++--- 17 files changed, 138 insertions(+), 117 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a4111835..b419b19c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,7 @@ sux = {git = "https://github.com/vigna/sux-rs" } dsi-bitstream = {git = "https://github.com/vigna/dsi-bitstream-rs" } # Bin dependancies clap = { version = "4.1.6", features = ["derive"] } -dsi-progress-logger = "0.1.0" +dsi-progress-logger = "0.2.0" log = "0.4.17" stderrlog = "0.5.4" rand = { version = "0.8.5", features = ["small_rng"] } diff --git a/README.md b/README.md index c46bfeed..14b5ba30 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,8 @@ # `webgraph` A pure Rust implementation of the [WebGraph framework](https://webgraph.di.unimi.it/) for graph compression. + +# Acknowledgments + +This software has been partially supported by project SERICS (PE00000014) under the NRRP MUR program funded by the EU - NGEU, +and by project ANR COREGRAPHIE, grant ANR-20-CE23-0002 of the French Agence Nationale de la Recherche. diff --git a/examples/bv_bf_visit.rs b/examples/bv_bf_visit.rs index c508b0a5..4a96e0cb 100644 --- a/examples/bv_bf_visit.rs +++ b/examples/bv_bf_visit.rs @@ -8,7 +8,7 @@ use anyhow::Result; use bitvec::*; use clap::Parser; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use std::collections::VecDeque; use webgraph::prelude::*; #[derive(Parser, Debug)] @@ -32,10 +32,11 @@ pub fn main() -> Result<()> { let mut visited = bitvec![0; num_nodes]; let mut queue = VecDeque::new(); - let mut pl = ProgressLogger::default().display_memory(); - pl.item_name = "node"; - pl.local_speed = true; - pl.expected_updates = Some(num_nodes); + let mut pl = ProgressLogger::default(); + pl.display_memory(true) + .item_name("node") + .local_speed(true) + .expected_updates(Some(num_nodes)); pl.start("Visiting graph..."); for start in 0..num_nodes { diff --git a/src/algorithms/bfs_order.rs b/src/algorithms/bfs_order.rs index e11c08a5..2fae1967 100644 --- a/src/algorithms/bfs_order.rs +++ b/src/algorithms/bfs_order.rs @@ -5,14 +5,14 @@ */ use crate::traits::RandomAccessGraph; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use std::collections::VecDeque; use sux::prelude::BitVec; /// Iterator on all nodes of the graph in a BFS order pub struct BfsOrder<'a, G: RandomAccessGraph> { graph: &'a G, - pl: ProgressLogger<'static>, + pl: ProgressLogger, visited: BitVec, queue: VecDeque, /// If the queue is empty, resume the BFS from that node. @@ -25,10 +25,11 @@ pub struct BfsOrder<'a, G: RandomAccessGraph> { impl<'a, G: RandomAccessGraph> BfsOrder<'a, G> { pub fn new(graph: &G) -> BfsOrder { let num_nodes = graph.num_nodes(); - let mut pl = ProgressLogger::default().display_memory(); - pl.item_name = "node"; - pl.local_speed = true; - pl.expected_updates = Some(num_nodes); + let mut pl = ProgressLogger::default(); + pl.display_memory(true) + .item_name("node") + .local_speed(true) + .expected_updates(Some(num_nodes)); pl.start("Visiting graph in BFS order..."); BfsOrder { graph, diff --git a/src/algorithms/llp.rs b/src/algorithms/llp.rs index 89a1a9b9..da52b4c8 100644 --- a/src/algorithms/llp.rs +++ b/src/algorithms/llp.rs @@ -8,7 +8,7 @@ use crate::prelude::*; use crate::{invert_in_place, traits::*}; use anyhow::Result; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use epserde::prelude::*; use lender::*; @@ -54,28 +54,31 @@ pub fn layered_label_propagation( .build()?; // init the gamma progress logger - let mut gamma_pr = ProgressLogger::default().display_memory(); - gamma_pr.item_name = "gamma"; - gamma_pr.expected_updates = Some(gammas.len()); + let mut gamma_pl = ProgressLogger::default(); + gamma_pl + .display_memory(true) + .item_name("gamma") + .expected_updates(Some(gammas.len())); // init the iteration progress logger - let mut iter_pr = ProgressLogger::default(); - iter_pr.item_name = "update"; + let mut iter_pl = ProgressLogger::default(); + iter_pl.item_name("update"); // init the update progress logger - let mut update_pr = ProgressLogger::default(); - update_pr.item_name = "node"; - update_pr.local_speed = true; - update_pr.expected_updates = Some(num_nodes); + let mut update_pl = ProgressLogger::default(); + update_pl + .item_name("node") + .local_speed(true) + .expected_updates(Some(num_nodes)); let seed = AtomicU64::new(seed); let mut costs = Vec::with_capacity(gammas.len()); - gamma_pr.start(format!("Running {} threads", num_threads)); + gamma_pl.start(format!("Running {} threads", num_threads)); for (gamma_index, gamma) in gammas.iter().enumerate() { // Reset mutable state for the next gamma - iter_pr.start(format!( + iter_pl.start(format!( "Starting iterations with gamma={} ({}/{})...", gamma, gamma_index + 1, @@ -88,7 +91,7 @@ pub fn layered_label_propagation( .for_each(|x| x.store(true, Ordering::Relaxed)); for i in 0..max_iters { - update_pr.start(format!("Starting update {}...", i)); + update_pl.start(format!("Starting update {}...", i)); update_perm.iter_mut().enumerate().for_each(|(i, x)| *x = i); thread_pool.install(|| { @@ -178,11 +181,11 @@ pub fn layered_label_propagation( |delta_obj_func_0, delta_obj_func_1| delta_obj_func_0 + delta_obj_func_1, &thread_pool, granularity, - Some(&mut update_pr), + Some(&mut update_pl), ); - update_pr.done_with_count(num_nodes); - iter_pr.update_and_display(); + update_pl.done_with_count(num_nodes); + iter_pl.update_and_display(); obj_func += delta_obj_func; let gain = delta_obj_func / obj_func; @@ -195,7 +198,7 @@ pub fn layered_label_propagation( } } - iter_pr.done(); + iter_pl.done(); update_perm.iter_mut().enumerate().for_each(|(i, x)| *x = i); // create sorted clusters by contiguous labels @@ -220,10 +223,10 @@ pub fn layered_label_propagation( let mut file = std::fs::File::create(format!("labels_{}.bin", gamma_index))?; labels.serialize(&mut file)?; - gamma_pr.update_and_display(); + gamma_pl.update_and_display(); } - gamma_pr.done(); + gamma_pl.done(); // compute the indices that sorts the gammas by cost let mut gamma_indices = (0..costs.len()).collect::>(); diff --git a/src/algorithms/simplify.rs b/src/algorithms/simplify.rs index 18fe8932..f213583b 100644 --- a/src/algorithms/simplify.rs +++ b/src/algorithms/simplify.rs @@ -8,7 +8,7 @@ use crate::graph::arc_list_graph; use crate::traits::SequentialGraph; use crate::utils::{BatchIterator, KMergeIters, SortPairs}; use anyhow::Result; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use itertools::{Dedup, Itertools}; use lender::*; /// Make the graph undirected and remove selfloops @@ -33,8 +33,8 @@ pub fn simplify( let mut sorted = SortPairs::new(batch_size, dir.into_path())?; let mut pl = ProgressLogger::default(); - pl.item_name = "node"; - pl.expected_updates = Some(graph.num_nodes()); + pl.item_name("node") + .expected_updates(Some(graph.num_nodes())); pl.start("Creating batches..."); // create batches of sorted edges let mut iter = graph.iter(); diff --git a/src/algorithms/transpose.rs b/src/algorithms/transpose.rs index 4d44b35b..c7947ab0 100644 --- a/src/algorithms/transpose.rs +++ b/src/algorithms/transpose.rs @@ -9,7 +9,7 @@ use crate::graph::arc_list_graph; use crate::traits::SequentialGraph; use crate::utils::{BatchIterator, KMergeIters, SortPairs}; use anyhow::Result; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use lender::*; /// Create transpose the graph and return a sequential graph view of it #[allow(clippy::type_complexity)] @@ -25,8 +25,8 @@ pub fn transpose( let mut sorted = SortPairs::new(batch_size, dir.into_path())?; let mut pl = ProgressLogger::default(); - pl.item_name = "node"; - pl.expected_updates = Some(graph.num_nodes()); + pl.item_name("node") + .expected_updates(Some(graph.num_nodes())); pl.start("Creating batches..."); // create batches of sorted edges for_iter! { (src, succ) in graph.iter() => diff --git a/src/bin/ascii_convert.rs b/src/bin/ascii_convert.rs index eaa88414..761737d8 100644 --- a/src/bin/ascii_convert.rs +++ b/src/bin/ascii_convert.rs @@ -6,7 +6,7 @@ use anyhow::Result; use clap::Parser; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use lender::*; use webgraph::traits::SequentialGraph; @@ -27,9 +27,9 @@ pub fn main() -> Result<()> { .unwrap(); let seq_graph = webgraph::graph::bvgraph::load_seq(&args.basename)?; - let mut pr = ProgressLogger::default().display_memory(); - pr.item_name = "offset"; - pr.start("Computing offsets..."); + let mut pl = ProgressLogger::default(); + pl.display_memory(true).item_name("offset"); + pl.start("Computing offsets..."); let mut iter = seq_graph.iter(); while let Some((node_id, successors)) = iter.next() { @@ -43,7 +43,7 @@ pub fn main() -> Result<()> { ); } - pr.done(); + pl.done(); Ok(()) } diff --git a/src/bin/build_eliasfano.rs b/src/bin/build_eliasfano.rs index 0d2afeb1..6836920d 100644 --- a/src/bin/build_eliasfano.rs +++ b/src/bin/build_eliasfano.rs @@ -7,7 +7,7 @@ use anyhow::{Context, Result}; use clap::Parser; use dsi_bitstream::prelude::*; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use epserde::prelude::*; use log::info; use std::fs::File; @@ -51,9 +51,10 @@ pub fn main() -> Result<()> { let of_file_str = format!("{}.offsets", args.basename); let of_file_path = std::path::Path::new(&of_file_str); - let mut pr = ProgressLogger::default().display_memory(); - pr.expected_updates = Some(num_nodes); - pr.item_name = "offset"; + let mut pl = ProgressLogger::default(); + pl.display_memory(true) + .item_name("offset") + .expected_updates(Some(num_nodes)); // if the offset files exists, read it to build elias-fano if of_file_path.exists() { @@ -62,7 +63,7 @@ pub fn main() -> Result<()> { // create a bit reader on the file let mut reader = BufBitReader::::new(>::new(of_file)); // progress bar - pr.start("Translating offsets to EliasFano..."); + pl.start("Translating offsets to EliasFano..."); // read the graph a write the offsets let mut offset = 0; for _node_id in 0..num_nodes + 1 { @@ -70,7 +71,7 @@ pub fn main() -> Result<()> { offset += reader.read_gamma()?; efb.push(offset as _)?; // decode the next nodes so we know where the next node_id starts - pr.light_update(); + pl.light_update(); } } else { info!("The offsets file does not exists, reading the graph to build Elias-Fano"); @@ -78,32 +79,34 @@ pub fn main() -> Result<()> { let seq_graph = seq_graph.map_codes_reader_builder(DynamicCodesReaderSkipperBuilder::from); // otherwise directly read the graph // progress bar - pr.start("Building EliasFano..."); + pl.start("Building EliasFano..."); // read the graph a write the offsets let mut iter = seq_graph.iter_degrees(); for (new_offset, _node_id, _degree) in iter.by_ref() { // write where efb.push(new_offset as _)?; // decode the next nodes so we know where the next node_id starts - pr.light_update(); + pl.light_update(); } efb.push(iter.get_pos() as _)?; } - pr.done(); + pl.done(); let ef = efb.build(); - let mut pr = ProgressLogger::default().display_memory(); - pr.start("Building the Index over the ones in the high-bits..."); + let mut pl = ProgressLogger::default(); + pl.display_memory(true); + pl.start("Building the Index over the ones in the high-bits..."); let ef: webgraph::EF<_> = ef.convert_to().unwrap(); - pr.done(); + pl.done(); - let mut pr = ProgressLogger::default().display_memory(); - pr.start("Writing to disk..."); + let mut pl = ProgressLogger::default(); + pl.display_memory(true); + pl.start("Writing to disk..."); // serialize and dump the schema to disk let schema = ef.serialize_with_schema(&mut ef_file)?; std::fs::write(format!("{}.ef.schema", args.basename), schema.to_csv())?; - pr.done(); + pl.done(); Ok(()) } diff --git a/src/bin/build_offsets.rs b/src/bin/build_offsets.rs index 22ab9972..0595c141 100644 --- a/src/bin/build_offsets.rs +++ b/src/bin/build_offsets.rs @@ -7,7 +7,7 @@ use anyhow::Result; use clap::Parser; use dsi_bitstream::prelude::*; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use std::io::BufWriter; use webgraph::prelude::*; @@ -37,10 +37,11 @@ pub fn main() -> Result<()> { BufWriter::with_capacity(1 << 20, file), )); // progress bar - let mut pr = ProgressLogger::default().display_memory(); - pr.item_name = "offset"; - pr.expected_updates = Some(seq_graph.num_nodes()); - pr.start("Computing offsets..."); + let mut pl = ProgressLogger::default(); + pl.display_memory(true) + .item_name("offset") + .expected_updates(Some(seq_graph.num_nodes())); + pl.start("Computing offsets..."); // read the graph a write the offsets let mut offset = 0; let mut degs_iter = seq_graph.iter_degrees(); @@ -49,11 +50,11 @@ pub fn main() -> Result<()> { writer.write_gamma((new_offset - offset) as _)?; offset = new_offset; // decode the next nodes so we know where the next node_id starts - pr.light_update(); + pl.light_update(); } // write the last offset, this is done to avoid decoding the last node writer.write_gamma((degs_iter.get_pos() - offset) as _)?; - pr.light_update(); - pr.done(); + pl.light_update(); + pl.done(); Ok(()) } diff --git a/src/bin/optimize_codes.rs b/src/bin/optimize_codes.rs index 4970724e..60c91ff2 100644 --- a/src/bin/optimize_codes.rs +++ b/src/bin/optimize_codes.rs @@ -6,7 +6,7 @@ use anyhow::Result; use clap::Parser; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use lender::*; use std::sync::atomic::Ordering; use webgraph::prelude::*; @@ -30,17 +30,18 @@ pub fn main() -> Result<()> { let seq_graph = webgraph::graph::bvgraph::load_seq(&args.basename)?; let seq_graph = seq_graph.map_codes_reader_builder(CodesReaderStatsBuilder::new); - let mut pr = ProgressLogger::default().display_memory(); - pr.item_name = "node"; - pr.start("Reading nodes..."); - pr.expected_updates = Some(seq_graph.num_nodes()); + let mut pl = ProgressLogger::default(); + pl.display_memory(true) + .item_name("node") + .expected_updates(Some(seq_graph.num_nodes())); + pl.start("Reading nodes..."); let mut iter = seq_graph.iter(); while iter.next().is_some() { - pr.light_update(); + pl.light_update(); } - pr.done(); + pl.done(); let reader = seq_graph.unwrap_codes_reader_builder(); let stats = reader.stats; diff --git a/src/bin/perm.rs b/src/bin/perm.rs index 479ea52a..3638f5ca 100644 --- a/src/bin/perm.rs +++ b/src/bin/perm.rs @@ -7,7 +7,7 @@ use anyhow::Result; use clap::Parser; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use epserde::prelude::*; use lender::*; use std::io::{BufReader, Read}; @@ -54,8 +54,8 @@ fn permute( perm: &[usize], num_nodes: usize, ) -> Result<()> { - let mut glob_pr = ProgressLogger::default().display_memory(); - glob_pr.item_name = "node"; + let mut glob_pl = ProgressLogger::default(); + glob_pl.display_memory(true).item_name("node"); let tmpdir = tempdir().unwrap(); // create a stream where to dump the sorted pairs @@ -98,9 +98,9 @@ pub fn main() -> Result<()> { .init() .unwrap(); - let mut glob_pr = ProgressLogger::default().display_memory(); - glob_pr.item_name = "node"; - glob_pr.start("Permuting the graph..."); + let mut glob_pl = ProgressLogger::default(); + glob_pl.display_memory(true).item_name("node"); + glob_pl.start("Permuting the graph..."); // TODO!: check that batchsize fits in memory, and that print the maximum // batch_size usable @@ -117,17 +117,17 @@ pub fn main() -> Result<()> { let mut perm = Vec::with_capacity(num_nodes); let mut buf = [0; core::mem::size_of::()]; - let mut perm_pr = ProgressLogger::default().display_memory(); - perm_pr.item_name = "node"; + let mut perm_pl = ProgressLogger::default(); + perm_pl.display_memory(true).item_name("node"); for _ in 0..num_nodes { file.read_exact(&mut buf)?; perm.push(usize::from_be_bytes(buf)); - perm_pr.light_update(); + perm_pl.light_update(); } - perm_pr.done(); + perm_pl.done(); permute(args, &graph, perm.as_ref(), num_nodes)?; } - glob_pr.done(); + glob_pl.done(); Ok(()) } diff --git a/src/bin/test_eliasfano.rs b/src/bin/test_eliasfano.rs index 0bb9387e..008fcc54 100644 --- a/src/bin/test_eliasfano.rs +++ b/src/bin/test_eliasfano.rs @@ -7,7 +7,7 @@ use anyhow::{Context, Result}; use clap::Parser; use dsi_bitstream::prelude::*; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use epserde::prelude::*; use log::info; use std::fs::File; @@ -46,9 +46,10 @@ pub fn main() -> Result<()> { let ef = >>::mmap(format!("{}.ef", args.basename), Flags::default())?; - let mut pr = ProgressLogger::default().display_memory(); - pr.expected_updates = Some(num_nodes as _); - pr.item_name = "offset"; + let mut pl = ProgressLogger::default(); + pl.display_memory(true) + .item_name("offset") + .expected_updates(Some(num_nodes)); // if the offset files exists, read it to build elias-fano if of_file_path.exists() { @@ -57,7 +58,7 @@ pub fn main() -> Result<()> { // create a bit reader on the file let mut reader = BufBitReader::::new(>::new(of_file)); // progress bar - pr.start("Translating offsets to EliasFano..."); + pl.start("Translating offsets to EliasFano..."); // read the graph a write the offsets let mut offset = 0; for node_id in 0..num_nodes + 1 { @@ -67,28 +68,29 @@ pub fn main() -> Result<()> { let ef_res = ef.get(node_id as _); assert_eq!(offset, ef_res as _, "node_id: {}", node_id); // decode the next nodes so we know where the next node_id starts - pr.light_update(); + pl.light_update(); } } - let mut pr = ProgressLogger::default().display_memory(); - pr.expected_updates = Some(num_nodes as _); - pr.item_name = "offset"; + let mut pl = ProgressLogger::default(); + pl.display_memory(true) + .item_name("offset") + .expected_updates(Some(num_nodes)); info!("The offsets file does not exists, reading the graph to build Elias-Fano"); let seq_graph = webgraph::graph::bvgraph::load_seq(&args.basename)?; let seq_graph = seq_graph.map_codes_reader_builder(DynamicCodesReaderSkipperBuilder::from); // otherwise directly read the graph // progress bar - pr.start("Building EliasFano..."); + pl.start("Building EliasFano..."); // read the graph a write the offsets for (new_offset, node_id, _degree) in seq_graph.iter_degrees() { // decode the next nodes so we know where the next node_id starts // read ef let ef_res = ef.get(node_id as _); assert_eq!(new_offset, ef_res as _, "node_id: {}", node_id); - pr.light_update(); + pl.light_update(); } - pr.done(); + pl.done(); Ok(()) } diff --git a/src/graph/bvgraph/bvgraph_writer_par.rs b/src/graph/bvgraph/bvgraph_writer_par.rs index 85c2f404..0380791f 100644 --- a/src/graph/bvgraph/bvgraph_writer_par.rs +++ b/src/graph/bvgraph/bvgraph_writer_par.rs @@ -8,7 +8,7 @@ use crate::prelude::*; use anyhow::Result; use dsi_bitstream::prelude::*; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use lender::*; use std::fs::File; use std::io::{BufReader, BufWriter}; @@ -52,10 +52,11 @@ pub fn compress_sequential_iter< ); let num_nodes = iter.len(); - let mut pr = ProgressLogger::default().display_memory(); - pr.item_name = "node"; - pr.expected_updates = Some(num_nodes); - pr.start("Compressing successors..."); + let mut pl = ProgressLogger::default(); + pl.display_memory(true) + .item_name("node") + .expected_updates(Some(num_nodes)); + pl.start("Compressing successors..."); let mut result = 0; if build_offsets { @@ -70,15 +71,15 @@ pub fn compress_sequential_iter< let delta = bvcomp.push(successors)?; result += delta; writer.write_gamma(delta as u64)?; - pr.update(); + pl.update(); } } else { for (_node_id, successors) in iter { result += bvcomp.push(successors)?; - pr.update(); + pl.update(); } } - pr.done(); + pl.done(); log::info!("Writing the .properties file"); let properties = compression_flags.to_properties(num_nodes, bvcomp.arcs); diff --git a/src/traits/graph.rs b/src/traits/graph.rs index e6380089..92c2ae59 100644 --- a/src/traits/graph.rs +++ b/src/traits/graph.rs @@ -16,7 +16,7 @@ use core::{ ops::Range, sync::atomic::{AtomicUsize, Ordering}, }; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use lender::*; use std::sync::Mutex; @@ -72,14 +72,14 @@ pub trait SequentialGraph { reduce: R, thread_pool: &rayon::ThreadPool, granularity: usize, - pr: Option<&mut ProgressLogger>, + pl: Option<&mut ProgressLogger>, ) -> T where F: Fn(Range) -> T + Send + Sync, R: Fn(T, T) -> T + Send + Sync, T: Send + Default, { - let pr_lock = pr.map(Mutex::new); + let pl_lock = pl.map(Mutex::new); let num_nodes = self.num_nodes(); let num_cpus = thread_pool .current_num_threads() @@ -95,7 +95,7 @@ pub trait SequentialGraph { res.push(rx); // create some references so that we can share them across threads - let pr_lock_ref = &pr_lock; + let pl_lock_ref = &pl_lock; let next_node_ref = &next_node; let func_ref = &func; let reduce_ref = &reduce; @@ -113,8 +113,8 @@ pub trait SequentialGraph { // apply the function and reduce the result result = reduce_ref(result, func_ref(start_pos..end_pos)); // update the progress logger if specified - if let Some(pr_lock) = pr_lock_ref { - pr_lock + if let Some(pl_lock) = pl_lock_ref { + pl_lock .lock() .unwrap() .update_with_count((start_pos..end_pos).len()); diff --git a/tests/test_bvcomp.rs b/tests/test_bvcomp.rs index f2d2c185..a213fb1f 100644 --- a/tests/test_bvcomp.rs +++ b/tests/test_bvcomp.rs @@ -20,7 +20,7 @@ use dsi_bitstream::{ }, traits::BE, }; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use webgraph::{ graph::bvgraph::{ BVComp, CompFlags, DynamicCodesReader, DynamicCodesWriter, WebgraphSequentialIter, @@ -76,10 +76,12 @@ fn test_bvcomp_slow() -> Result<()> { 0, ); - let mut pl = ProgressLogger::default().display_memory(); - pl.item_name = "node"; + let mut pl = ProgressLogger::default(); + pl.display_memory(true) + .item_name("node") + .expected_updates(Some(NODES)); + pl.start("Compressing..."); - pl.expected_updates = Some(NODES); let mut iter_nodes = seq_graph.iter(); while let Some((_, iter)) = iter_nodes.next() { diff --git a/tests/test_par_bvcomp.rs b/tests/test_par_bvcomp.rs index 926463f5..f4c2de60 100644 --- a/tests/test_par_bvcomp.rs +++ b/tests/test_par_bvcomp.rs @@ -6,7 +6,7 @@ */ use anyhow::Result; -use dsi_progress_logger::ProgressLogger; +use dsi_progress_logger::*; use lender::*; use webgraph::prelude::*; @@ -41,10 +41,11 @@ fn test_par_bvcomp() -> Result<()> { let comp_graph = webgraph::graph::bvgraph::load_seq(tmp_basename)?; let mut iter = comp_graph.iter(); - let mut pr = ProgressLogger::default().display_memory(); - pr.item_name = "node"; + let mut pr = ProgressLogger::default(); + pr.display_memory(true) + .item_name("node") + .expected_updates(Some(graph.num_nodes())); pr.start("Checking that the newly compressed graph is equivalent to the original one..."); - pr.expected_updates = Some(graph.num_nodes()); let mut iter_nodes = graph.iter(); while let Some((node, succ_iter)) = iter_nodes.next() {