diff --git a/.cargo/audit.toml b/.cargo/audit.toml index aa1db3e6..0b118933 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -3,8 +3,4 @@ ignore = [ # FIXME!: See https://github.com/RustCrypto/RSA/issues/19#issuecomment-1822995643. # There is no workaround available yet. "RUSTSEC-2023-0071", - # FIXME: backoff => used in backend, need to be replaced with backon - "RUSTSEC-2024-0384", - # FIXME: derivative => used for default impls - "RUSTSEC-2024-0388", ] diff --git a/.github/workflows/careful.yml b/.github/workflows/careful.yml index 472bfd64..10458965 100644 --- a/.github/workflows/careful.yml +++ b/.github/workflows/careful.yml @@ -37,7 +37,7 @@ jobs: with: toolchain: ${{ matrix.rust }} - name: install cargo-careful - uses: taiki-e/install-action@278aeeb6e331c1bd610bffe45862e09452854b1a # v2 + uses: taiki-e/install-action@5d427d86f088a6cedcddb92b3ad038d30721b52f # v2 with: tool: cargo-careful - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2 diff --git a/.github/workflows/ci-heavy.yml b/.github/workflows/ci-heavy.yml index d3260452..63a003d9 100644 --- a/.github/workflows/ci-heavy.yml +++ b/.github/workflows/ci-heavy.yml @@ -151,7 +151,7 @@ jobs: with: toolchain: ${{ matrix.rust }} - name: install cargo-hack - uses: taiki-e/install-action@278aeeb6e331c1bd610bffe45862e09452854b1a # v2 + uses: taiki-e/install-action@5d427d86f088a6cedcddb92b3ad038d30721b52f # v2 with: tool: cargo-hack - uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2 @@ -253,7 +253,7 @@ jobs: steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install cargo-hack - uses: taiki-e/install-action@278aeeb6e331c1bd610bffe45862e09452854b1a # v2 + uses: taiki-e/install-action@5d427d86f088a6cedcddb92b3ad038d30721b52f # v2 with: tool: cargo-hack diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index fb06abb6..9dd9d393 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -19,8 +19,8 @@ jobs: - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - - name: Install cargo-hack - uses: taiki-e/install-action@278aeeb6e331c1bd610bffe45862e09452854b1a # v2 + - name: Install cargo-tarpaulin + uses: taiki-e/install-action@5d427d86f088a6cedcddb92b3ad038d30721b52f # v2 with: tool: cargo-tarpaulin @@ -29,11 +29,13 @@ jobs: # This is because we have a workspace with multiple crates, and we want to generate coverage for all of them, but we only want to # report the coverage of rustic_backend and rustic_core crates (currently) as this is where the main logic is - name: Generate code coverage + env: + RUST_BACKTRACE: "0" run: | cargo tarpaulin --verbose --all-features --workspace --timeout 120 --out xml - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4 + uses: codecov/codecov-action@5c47607acb93fed5485fdbf7232e8a31425f672a # v5 with: - token: ${{ secrets.CODECOV_TOKEN }} - slug: rustic-rs/rustic_core \ No newline at end of file + token: ${{ secrets.CODECOV_TOKEN }} + slug: rustic-rs/rustic_core \ No newline at end of file diff --git a/.github/workflows/release-plz.yml b/.github/workflows/release-plz.yml index 43e1a927..fcd7e665 100644 --- a/.github/workflows/release-plz.yml +++ b/.github/workflows/release-plz.yml @@ -30,7 +30,7 @@ jobs: uses: dtolnay/rust-toolchain@stable - name: Run release-plz - uses: MarcoIeni/release-plz-action@0a28fd4d52f6b5e06e0cf8e75a22e96e1966fc2f # v0.5 + uses: MarcoIeni/release-plz-action@f0fdffff1ff475195a1638e487cf93719e065a8e # v0.5 env: GITHUB_TOKEN: ${{ steps.generate-token.outputs.token }} CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} \ No newline at end of file diff --git a/.justfile b/.justfile new file mode 100644 index 00000000..5a510ec7 --- /dev/null +++ b/.justfile @@ -0,0 +1,79 @@ +# 'Just' Configuration +# Loads .env file for variables to be used in +# in this just file + +set dotenv-load := true + +# Ignore recipes that are commented out + +set ignore-comments := true + +# Set shell for Windows OSs: +# If you have PowerShell Core installed and want to use it, +# use `pwsh.exe` instead of `powershell.exe` + +set windows-shell := ["powershell.exe", "-NoLogo", "-Command"] + +# Set shell for non-Windows OSs: + +set shell := ["bash", "-uc"] + +export RUST_BACKTRACE := "1" +export RUST_LOG := "info" +export CI := "1" + +build: + cargo build --all-features + cargo build -r --all-features + +b: build + +check: + cargo check --no-default-features --all-targets --workspace + cargo check --all-features --all-targets --workspace + +c: check + +ci: + just loop . dev + +dev: format lint test + +d: dev + +doc: + cargo +stable doc --no-deps --all-features --workspace --examples + +format-dprint: + dprint fmt + +format-cargo: + cargo fmt --all + +format: format-cargo format-dprint + +fmt: format + +rev: + cargo insta review + +inverse-deps crate: + cargo tree -e features -i {{ crate }} + +lint: check + cargo clippy --no-default-features -- -D warnings + cargo clippy --all-targets --all-features -- -D warnings + +loop dir action: + watchexec -w {{ dir }} -- "just {{ action }}" + +test: check lint + cargo test --all-targets --all-features --workspace --examples + +test-ignored: check lint + cargo test --all-targets --all-features --workspace --examples -- --ignored + +t: test test-ignored + +coverage $RUST_BACKTRACE="0": + cargo tarpaulin --verbose --all-features --workspace --timeout 120 --out Lcov --output-dir coverage diff --git a/Cargo.lock b/Cargo.lock index e0e9370f..50abfeb7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -349,17 +349,6 @@ dependencies = [ "paste", ] -[[package]] -name = "backoff" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" -dependencies = [ - "getrandom", - "instant", - "rand", -] - [[package]] name = "backon" version = "1.2.0" @@ -1107,17 +1096,6 @@ dependencies = [ "serde", ] -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "derive_destructure2" version = "0.1.3" @@ -2083,15 +2061,6 @@ dependencies = [ "similar", ] -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", -] - [[package]] name = "integer-sqrt" version = "0.1.5" @@ -3580,12 +3549,12 @@ dependencies = [ [[package]] name = "rustic_backend" -version = "0.4.2" +version = "0.5.0" dependencies = [ "aho-corasick", "anyhow", "aws-lc-sys", - "backoff", + "backon", "bytes", "bytesize", "clap", @@ -3626,7 +3595,7 @@ version = "0.2.2" [[package]] name = "rustic_core" -version = "0.5.5" +version = "0.6.1" dependencies = [ "aes256ctr_poly1305aes", "anyhow", @@ -3640,7 +3609,6 @@ dependencies = [ "conflate", "crossbeam-channel", "dav-server", - "derivative", "derive_more", "derive_setters", "dirs", @@ -3701,7 +3669,7 @@ dependencies = [ [[package]] name = "rustic_testing" -version = "0.2.3" +version = "0.3.0" dependencies = [ "aho-corasick", "anyhow", diff --git a/crates/backend/CHANGELOG.md b/crates/backend/CHANGELOG.md index 7e0b6aee..d45614e7 100644 --- a/crates/backend/CHANGELOG.md +++ b/crates/backend/CHANGELOG.md @@ -2,6 +2,23 @@ All notable changes to this project will be documented in this file. +## [0.5.0](https://github.com/rustic-rs/rustic_core/compare/rustic_backend-v0.4.2...rustic_backend-v0.5.0) - 2024-11-18 + +### Added + +- *(async)* add `async_compatible` methods to identify backend compatibility ([#355](https://github.com/rustic-rs/rustic_core/pull/355)) +- add 'yandex-disk' to enabled opendal services and update opendal to 0.50.2 ([#360](https://github.com/rustic-rs/rustic_core/pull/360)) + +### Other + +- *(error)* enhance error logging and output formatting ([#361](https://github.com/rustic-rs/rustic_core/pull/361)) +- *(backend)* simplify code in local backend ([#362](https://github.com/rustic-rs/rustic_core/pull/362)) +- *(backend)* migrate from `backoff` to `backon` ([#356](https://github.com/rustic-rs/rustic_core/pull/356)) +- *(error)* improve error messages and file handling ([#334](https://github.com/rustic-rs/rustic_core/pull/334)) +- *(deps)* lock file maintenance rust dependencies ([#345](https://github.com/rustic-rs/rustic_core/pull/345)) +- *(deps)* [**breaking**] upgrade to new conflate version ([#300](https://github.com/rustic-rs/rustic_core/pull/300)) +- *(errors)* [**breaking**] Improve error handling, display and clean up codebase ([#321](https://github.com/rustic-rs/rustic_core/pull/321)) + ## [0.4.2](https://github.com/rustic-rs/rustic_core/compare/rustic_backend-v0.4.1...rustic_backend-v0.4.2) - 2024-10-24 ### Fixed diff --git a/crates/backend/Cargo.toml b/crates/backend/Cargo.toml index 335c9d27..c1c52d82 100644 --- a/crates/backend/Cargo.toml +++ b/crates/backend/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rustic_backend" -version = "0.4.2" +version = "0.5.0" authors = ["the rustic-rs team"] categories = ["data-structures", "filesystem"] documentation = "https://docs.rs/rustic_backend" @@ -42,7 +42,7 @@ opendal = [ "tokio/rt-multi-thread", "dep:typed-path", ] -rest = ["dep:reqwest", "dep:backoff"] +rest = ["dep:reqwest", "dep:backon"] rclone = ["rest", "dep:rand", "dep:semver"] [dependencies] @@ -78,7 +78,7 @@ aho-corasick = { workspace = true } walkdir = "2.5.0" # rest backend -backoff = { version = "0.4.0", optional = true } +backon = { version = "1.2.0", optional = true } reqwest = { version = "0.12.8", default-features = false, features = ["json", "rustls-tls-native-roots", "stream", "blocking"], optional = true } # rclone backend @@ -97,11 +97,11 @@ features = ["prebuilt-nasm"] [target.'cfg(not(windows))'.dependencies] # opendal backend - sftp is not supported on windows, see https://github.com/apache/incubator-opendal/issues/2963 -opendal = { version = "0.50.0", features = ["services-b2", "services-sftp", "services-swift", "services-azblob", "services-azdls", "services-cos", "services-fs", "services-ftp", "services-dropbox", "services-gdrive", "services-gcs", "services-ghac", "services-http", "services-ipmfs", "services-memory", "services-obs", "services-onedrive", "services-oss", "services-s3", "services-webdav", "services-webhdfs", "services-azfile", "layers-blocking", "layers-throttle"], optional = true } +opendal = { version = "0.50.2", features = ["services-b2", "services-sftp", "services-swift", "services-azblob", "services-azdls", "services-cos", "services-fs", "services-ftp", "services-dropbox", "services-gdrive", "services-gcs", "services-ghac", "services-http", "services-ipmfs", "services-memory", "services-obs", "services-onedrive", "services-oss", "services-s3", "services-webdav", "services-webhdfs", "services-azfile", "layers-blocking", "layers-throttle", "services-yandex-disk"], optional = true } [target.'cfg(windows)'.dependencies] # opendal backend -opendal = { version = "0.50.0", features = ["services-b2", "services-swift", "services-azblob", "services-azdls", "services-cos", "services-fs", "services-ftp", "services-dropbox", "services-gdrive", "services-gcs", "services-ghac", "services-http", "services-ipmfs", "services-memory", "services-obs", "services-onedrive", "services-oss", "services-s3", "services-webdav", "services-webhdfs", "services-azfile", "layers-blocking", "layers-throttle"], optional = true } +opendal = { version = "0.50.2", features = ["services-b2", "services-swift", "services-azblob", "services-azdls", "services-cos", "services-fs", "services-ftp", "services-dropbox", "services-gdrive", "services-gcs", "services-ghac", "services-http", "services-ipmfs", "services-memory", "services-obs", "services-onedrive", "services-oss", "services-s3", "services-webdav", "services-webhdfs", "services-azfile", "layers-blocking", "layers-throttle", "services-yandex-disk"], optional = true } [dev-dependencies] anyhow = { workspace = true } diff --git a/crates/backend/src/local.rs b/crates/backend/src/local.rs index e20f27cc..a60e2d04 100644 --- a/crates/backend/src/local.rs +++ b/crates/backend/src/local.rs @@ -70,6 +70,25 @@ impl LocalBackend { }) } + /// Base path of the given file type and id. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Returns + /// + /// The base path of the file. + fn base_path(&self, tpe: FileType, id: &Id) -> PathBuf { + let hex_id = id.to_hex(); + match tpe { + FileType::Config => self.path.clone(), + FileType::Pack => self.path.join("data").join(&hex_id[0..2]), + _ => self.path.join(tpe.dirname()), + } + } + /// Path to the given file type and id. /// /// If the file type is `FileType::Pack`, the id will be used to determine the subdirectory. @@ -86,8 +105,7 @@ impl LocalBackend { let hex_id = id.to_hex(); match tpe { FileType::Config => self.path.join("config"), - FileType::Pack => self.path.join("data").join(&hex_id[0..2]).join(hex_id), - _ => self.path.join(tpe.dirname()).join(hex_id), + _ => self.base_path(tpe, id).join(hex_id), } } @@ -169,21 +187,6 @@ impl LocalBackend { } Ok(()) } - - /// Returns the parent path of the given file type and id. - /// - /// # Arguments - /// - /// * `tpe` - The type of the file. - /// * `id` - The id of the file. - /// - /// # Returns - /// - /// The parent path of the file or `None` if the file does not have a parent. - fn parent_path(&self, tpe: FileType, id: &Id) -> Option { - let path = self.path(tpe, id); - path.parent().map(Path::to_path_buf) - } } impl ReadBackend for LocalBackend { @@ -312,7 +315,7 @@ impl ReadBackend for LocalBackend { }) .inspect(|r| { if let Err(err) = r { - error!("Error while listing files: {err:?}"); + error!("Error while listing files: {}", err.display_log()); } }) .filter_map(RusticResult::ok); @@ -414,6 +417,13 @@ impl ReadBackend for LocalBackend { Ok(vec.into()) } + + /// [`LocalBackend`] doesn't use `async`, even under the hood. + /// + /// So it can be called from `async` features. + fn is_async_compatible(&self) -> bool { + true + } } impl WriteBackend for LocalBackend { @@ -489,19 +499,10 @@ impl WriteBackend for LocalBackend { trace!("writing tpe: {:?}, id: {}", &tpe, &id); let filename = self.path(tpe, id); - let Some(parent) = self.parent_path(tpe, id) else { - return Err( - RusticError::new( - ErrorKind::Backend, - "The file `{path}` does not have a parent directory. This may be empty or a root path. Please check the file and try again.", - ) - .attach_context("path", filename.display().to_string()) - .ask_report() - ); - }; + let parent = self.base_path(tpe, id); // create parent directory if it does not exist - fs::create_dir_all(parent.clone()).map_err(|err| { + fs::create_dir_all(&parent).map_err(|err| { RusticError::with_source( ErrorKind::InputOutput, "Failed to create directories `{path}`. Does the directory already exist? Please check the file and try again.", @@ -563,7 +564,7 @@ impl WriteBackend for LocalBackend { if let Some(command) = &self.post_create_command { if let Err(err) = Self::call_command(tpe, id, &filename, command) { - warn!("post-create: {err}"); + warn!("post-create: {}", err.display_log()); } } Ok(()) @@ -593,7 +594,7 @@ impl WriteBackend for LocalBackend { )?; if let Some(command) = &self.post_delete_command { if let Err(err) = Self::call_command(tpe, id, &filename, command) { - warn!("post-delete: {err}"); + warn!("post-delete: {}", err.display_log()); } } Ok(()) diff --git a/crates/backend/src/opendal.rs b/crates/backend/src/opendal.rs index 0165d0e4..acfb7c92 100644 --- a/crates/backend/src/opendal.rs +++ b/crates/backend/src/opendal.rs @@ -328,7 +328,7 @@ impl ReadBackend for OpenDALBackend { }) .inspect(|r| { if let Err(err) = r { - error!("Error while listing files: {err:?}"); + error!("Error while listing files: {}", err.display_log()); } }) .filter_map(RusticResult::ok) @@ -386,6 +386,15 @@ impl ReadBackend for OpenDALBackend { )? .to_bytes()) } + + /// [`OpenDALBackend`] is `sync` and uses `block_on(async Fn)` under the hood. + /// + /// When implementing `rustic_core` using this backend in some `async` features will not work. + /// + /// see + fn is_async_compatible(&self) -> bool { + false + } } impl WriteBackend for OpenDALBackend { diff --git a/crates/backend/src/rclone.rs b/crates/backend/src/rclone.rs index 94bb4b3b..c8522900 100644 --- a/crates/backend/src/rclone.rs +++ b/crates/backend/src/rclone.rs @@ -353,6 +353,11 @@ impl ReadBackend for RcloneBackend { ) -> RusticResult { self.rest.read_partial(tpe, id, cacheable, offset, length) } + + /// [`RcloneBackend`] uses [`RestBackend`]. + fn is_async_compatible(&self) -> bool { + self.rest.is_async_compatible() + } } impl WriteBackend for RcloneBackend { diff --git a/crates/backend/src/rest.rs b/crates/backend/src/rest.rs index 2293afa6..8a7e0223 100644 --- a/crates/backend/src/rest.rs +++ b/crates/backend/src/rest.rs @@ -1,11 +1,11 @@ use std::str::FromStr; use std::time::Duration; -use backoff::{backoff::Backoff, ExponentialBackoff, ExponentialBackoffBuilder}; +use backon::{BlockingRetryable, ExponentialBuilder}; use bytes::Bytes; use log::{trace, warn}; use reqwest::{ - blocking::{Client, ClientBuilder, Response}, + blocking::{Client, ClientBuilder}, header::{HeaderMap, HeaderValue}, Url, }; @@ -28,80 +28,12 @@ pub(super) mod constants { pub(super) const DEFAULT_TIMEOUT: Duration = Duration::from_secs(600); } -// trait CheckError to add user-defined method check_error on Response -pub(crate) trait CheckError { - /// Check reqwest Response for error and treat errors as permanent or transient - fn check_error(self) -> Result>; -} - -impl CheckError for Response { - /// Check reqwest Response for error and treat errors as permanent or transient - /// - /// # Errors - /// - /// If the response is an error, it will return an error of type Error - /// - /// # Returns - /// - /// The response if it is not an error - fn check_error(self) -> Result> { - match self.error_for_status() { - Ok(t) => Ok(t), - // Note: status() always give Some(_) as it is called from a Response - Err(err) if err.status().unwrap().is_client_error() => { - Err(backoff::Error::Permanent(err)) - } - Err(err) => Err(backoff::Error::Transient { - err, - retry_after: None, - }), - } - } -} - -/// A backoff implementation that limits the number of retries -#[derive(Clone, Debug)] -struct LimitRetryBackoff { - /// The maximum number of retries - max_retries: usize, - /// The current number of retries - retries: usize, - /// The exponential backoff - exp: ExponentialBackoff, -} - -impl Default for LimitRetryBackoff { - fn default() -> Self { - Self { - max_retries: constants::DEFAULT_RETRY, - retries: 0, - exp: ExponentialBackoffBuilder::new() - .with_max_elapsed_time(None) // no maximum elapsed time; we count number of retires - .build(), - } - } -} - -impl Backoff for LimitRetryBackoff { - /// Returns the next backoff duration. - /// - /// # Notes - /// - /// If the number of retries exceeds the maximum number of retries, it returns None. - fn next_backoff(&mut self) -> Option { - self.retries += 1; - if self.retries > self.max_retries { - None - } else { - self.exp.next_backoff() - } - } - - /// Resets the backoff to the initial state. - fn reset(&mut self) { - self.retries = 0; - self.exp.reset(); - } +fn construct_backoff_error(err: reqwest::Error) -> Box { + RusticError::with_source( + ErrorKind::Backend, + "Backoff failed, please check the logs for more information.", + err, + ) } /// A backend implementation that uses REST to access the backend. @@ -111,24 +43,37 @@ pub struct RestBackend { url: Url, /// The client to use. client: Client, - /// The backoff implementation to use. - backoff: LimitRetryBackoff, -} - -/// Notify function for backoff in case of error -/// -/// # Arguments -/// -/// * `err` - The error that occurred -/// * `duration` - The duration of the backoff -// We need to pass the error by value to satisfy the signature of the notify function -// for handling errors in backoff -#[allow(clippy::needless_pass_by_value)] -fn notify(err: reqwest::Error, duration: Duration) { - warn!("Error {err} at {duration:?}, retrying"); + /// The ``BackoffBuilder`` we use + backoff: ExponentialBuilder, } impl RestBackend { + /// Call the given operation retrying non-permanent errors and giving warnings for failed operations + /// + /// ## Permanent/non-permanent errors + /// + /// - `client_error` are considered permanent + /// - others are not, and are subject to retry + /// + /// ## Returns + /// + /// The operation result + /// or the last error (permanent or not) that occurred. + fn retry_notify(&self, op: F) -> Result + where + F: FnMut() -> Result, + { + op.retry(self.backoff) + .when(|err| { + err.status().map_or( + true, // retry + |status_code| !status_code.is_client_error(), // do not retry if `is_client_error` + ) + }) + .notify(|err, duration| warn!("Error {err} at {duration:?}, retrying")) + .call() + } + /// Create a new [`RestBackend`] from a given url. /// /// # Arguments @@ -169,7 +114,12 @@ impl RestBackend { .map_err(|err| { RusticError::with_source(ErrorKind::Backend, "Failed to build HTTP client", err) })?; - let mut backoff = LimitRetryBackoff::default(); + + // backon doesn't allow us to specify `None` for `max_delay` + // see + let mut backoff = ExponentialBuilder::default() + .with_max_delay(Duration::MAX) // no maximum elapsed time; we count number of retries + .with_max_times(constants::DEFAULT_RETRY); // FIXME: If we have multiple times the same option, this could lead to unexpected behavior for (option, value) in options { @@ -187,7 +137,7 @@ impl RestBackend { .attach_context("option", "retry") })?, }; - backoff.max_retries = max_retries; + backoff = backoff.with_max_times(max_retries); } else if option == "timeout" { let timeout = humantime::Duration::from_str(&value).map_err(|err| { RusticError::with_source( @@ -299,37 +249,34 @@ impl ReadBackend for RestBackend { .attach_context("tpe_dir", tpe.dirname().to_string()) })?; - backoff::retry_notify( - self.backoff.clone(), - || { - if tpe == FileType::Config { - return Ok( - if self.client.head(url.clone()).send()?.status().is_success() { - vec![(Id::default(), 0)] - } else { - Vec::new() - }, - ); - } - - let list = self - .client - .get(url.clone()) - .header("Accept", "application/vnd.x.restic.rest.v2") - .send()? - .check_error()? - .json::>>()? // use Option to be handle null json value - .unwrap_or_default(); - Ok(list - .into_iter() - .filter_map(|i| match i.name.parse::() { - Ok(id) => Some((id, i.size)), - Err(_) => None, - }) - .collect()) - }, - notify, - ) + self.retry_notify(|| { + if tpe == FileType::Config { + return Ok( + if self.client.head(url.clone()).send()?.status().is_success() { + vec![(Id::default(), 0)] + } else { + Vec::new() + }, + ); + } + + let list = self + .client + .get(url.clone()) + .header("Accept", "application/vnd.x.restic.rest.v2") + .send()? + .error_for_status()? + .json::>>()? // use Option to be handle null json value + .unwrap_or_default(); + + Ok(list + .into_iter() + .filter_map(|i| match i.name.parse::() { + Ok(id) => Some((id, i.size)), + Err(_) => None, + }) + .collect()) + }) .map_err(construct_backoff_error) } @@ -351,18 +298,13 @@ impl ReadBackend for RestBackend { .url(tpe, id) .map_err(|err| construct_join_url_error(err, tpe, id, &self.url))?; - backoff::retry_notify( - self.backoff.clone(), - || { - Ok(self - .client - .get(url.clone()) - .send()? - .check_error()? - .bytes()?) - }, - notify, - ) + self.retry_notify(|| { + self.client + .get(url.clone()) + .send()? + .error_for_status()? + .bytes() + }) .map_err(construct_backoff_error) } @@ -398,29 +340,26 @@ impl ReadBackend for RestBackend { .attach_context("id", id.to_string()) })?; - backoff::retry_notify( - self.backoff.clone(), - || { - Ok(self - .client - .get(url.clone()) - .header("Range", header_value.clone()) - .send()? - .check_error()? - .bytes()?) - }, - notify, - ) + self.retry_notify(|| { + self.client + .get(url.clone()) + .header("Range", header_value.clone()) + .send()? + .error_for_status()? + .bytes() + }) .map_err(construct_backoff_error) } -} -fn construct_backoff_error(err: backoff::Error) -> Box { - RusticError::with_source( - ErrorKind::Backend, - "Backoff failed, please check the logs for more information.", - err, - ) + /// [`RestBackend`] uses `reqwest` which blocking implementation + /// uses an `async` runtime under the hood. + /// + /// When implementing `rustic_core` using this backend in some `async` features will not work. + /// + /// see + fn is_async_compatible(&self) -> bool { + false + } } fn construct_join_url_error( @@ -453,14 +392,10 @@ impl WriteBackend for RestBackend { .attach_context("join_input", "?create=true") })?; - backoff::retry_notify( - self.backoff.clone(), - || { - _ = self.client.post(url.clone()).send()?.check_error()?; - Ok(()) - }, - notify, - ) + self.retry_notify(|| { + _ = self.client.post(url.clone()).send()?.error_for_status()?; + Ok(()) + }) .map_err(construct_backoff_error) } @@ -492,15 +427,15 @@ impl WriteBackend for RestBackend { ) .body(buf); - backoff::retry_notify( - self.backoff.clone(), - || { - // Note: try_clone() always gives Some(_) as the body is Bytes which is cloneable - _ = req_builder.try_clone().unwrap().send()?.check_error()?; - Ok(()) - }, - notify, - ) + self.retry_notify(|| { + // Note: try_clone() always gives Some(_) as the body is Bytes which is cloneable + _ = req_builder + .try_clone() + .unwrap() + .send()? + .error_for_status()?; + Ok(()) + }) .map_err(construct_backoff_error) } @@ -521,14 +456,10 @@ impl WriteBackend for RestBackend { .url(tpe, id) .map_err(|err| construct_join_url_error(err, tpe, id, &self.url))?; - backoff::retry_notify( - self.backoff.clone(), - || { - _ = self.client.delete(url.clone()).send()?.check_error()?; - Ok(()) - }, - notify, - ) + self.retry_notify(|| { + _ = self.client.delete(url.clone()).send()?.error_for_status()?; + Ok(()) + }) .map_err(construct_backoff_error) } } diff --git a/crates/core/CHANGELOG.md b/crates/core/CHANGELOG.md index d3e91dbb..05cd0fbb 100644 --- a/crates/core/CHANGELOG.md +++ b/crates/core/CHANGELOG.md @@ -2,6 +2,36 @@ All notable changes to this project will be documented in this file. +## [0.6.1](https://github.com/rustic-rs/rustic_core/compare/rustic_core-v0.6.0...rustic_core-v0.6.1) - 2024-11-19 + +### Added + +- make FilePolicy usable in cli and config + +### Fixed + +- *(error)* add missing context to error in cache backend + +## [0.6.0](https://github.com/rustic-rs/rustic_core/compare/rustic_core-v0.5.5...rustic_core-v0.6.0) - 2024-11-18 + +### Added + +- *(async)* add `async_compatible` methods to identify backend compatibility ([#355](https://github.com/rustic-rs/rustic_core/pull/355)) + +### Fixed + +- prevent overwriting hot repository on init ([#353](https://github.com/rustic-rs/rustic_core/pull/353)) + +### Other + +- *(error)* enhance error logging and output formatting ([#361](https://github.com/rustic-rs/rustic_core/pull/361)) +- *(deps)* remove Derivative and replace with Default impl due to RUSTSEC-2024-0388 ([#359](https://github.com/rustic-rs/rustic_core/pull/359)) +- *(error)* improve error messages and file handling ([#334](https://github.com/rustic-rs/rustic_core/pull/334)) +- *(deps)* lock file maintenance rust dependencies ([#345](https://github.com/rustic-rs/rustic_core/pull/345)) +- *(deps)* remove cdc and switch to rustic_cdc ([#348](https://github.com/rustic-rs/rustic_core/pull/348)) +- *(deps)* [**breaking**] upgrade to new conflate version ([#300](https://github.com/rustic-rs/rustic_core/pull/300)) +- *(errors)* [**breaking**] Improve error handling, display and clean up codebase ([#321](https://github.com/rustic-rs/rustic_core/pull/321)) + ## [0.5.5](https://github.com/rustic-rs/rustic_core/compare/rustic_core-v0.5.4...rustic_core-v0.5.5) - 2024-10-25 ### Fixed diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index e7ca9064..645770ae 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rustic_core" -version = "0.5.5" +version = "0.6.1" authors = ["the rustic-rs team"] categories = ["data-structures", "encoding", "filesystem"] documentation = "https://docs.rs/rustic_core" @@ -45,7 +45,6 @@ displaydoc = { workspace = true } thiserror = { workspace = true } # macros -derivative = "2.2.0" derive_more = { version = "1.0.0", features = ["add", "constructor", "display", "from", "deref", "from_str"] } derive_setters = "0.1.6" diff --git a/crates/core/src/archiver.rs b/crates/core/src/archiver.rs index e9ab4ae3..fa2f6db6 100644 --- a/crates/core/src/archiver.rs +++ b/crates/core/src/archiver.rs @@ -144,15 +144,15 @@ impl<'a, BE: DecryptFullBackend, I: ReadGlobalIndex> Archiver<'a, BE, I> { match src.size() { Ok(Some(size)) => p.set_length(size), Ok(None) => {} - Err(err) => warn!("error determining backup size: {err}"), + Err(err) => warn!("error determining backup size: {}", err.display_log()), } } }); // filter out errors and handle as_path let iter = src.entries().filter_map(|item| match item { - Err(e) => { - warn!("ignoring error {e}\n"); + Err(err) => { + warn!("ignoring error: {}", err.display_log()); None } Ok(ReadSourceEntry { path, node, open }) => { @@ -197,7 +197,7 @@ impl<'a, BE: DecryptFullBackend, I: ReadGlobalIndex> Archiver<'a, BE, I> { .filter_map(|item| match item { Ok(item) => Some(item), Err(err) => { - warn!("ignoring error: {err:?}"); + warn!("ignoring error: {}", err.display_log()); None } }) diff --git a/crates/core/src/archiver/parent.rs b/crates/core/src/archiver/parent.rs index f807ce9d..f1eb176f 100644 --- a/crates/core/src/archiver/parent.rs +++ b/crates/core/src/archiver/parent.rs @@ -98,7 +98,10 @@ impl Parent { let tree = tree_id.and_then(|tree_id| match Tree::from_backend(be, index, tree_id) { Ok(tree) => Some(tree), Err(err) => { - warn!("ignoring error when loading parent tree {tree_id}: {err}"); + warn!( + "ignoring error when loading parent tree {tree_id}: {}", + err.display_log() + ); None } }); @@ -202,7 +205,10 @@ impl Parent { |tree_id| match Tree::from_backend(be, index, tree_id) { Ok(tree) => Some(tree), Err(err) => { - warn!("ignoring error when loading parent tree {tree_id}: {err}"); + warn!( + "ignoring error when loading parent tree {tree_id}: {}", + err.display_log() + ); None } }, @@ -274,8 +280,9 @@ impl Parent { ParentResult::Matched(()) } else { warn!( - "missing blobs in index for unchanged file {path:?}; re-reading file", - ); + "missing blobs in index for unchanged file {}; re-reading file", + path.display() + ); ParentResult::NotFound } } diff --git a/crates/core/src/backend.rs b/crates/core/src/backend.rs index 2179671b..25896f89 100644 --- a/crates/core/src/backend.rs +++ b/crates/core/src/backend.rs @@ -174,6 +174,21 @@ pub trait ReadBackend: Send + Sync + 'static { fn warm_up(&self, _tpe: FileType, _id: &Id) -> RusticResult<()> { Ok(()) } + + /// Getter to determine if some backend is compatible + /// with async features in `rustic_core` implementations. + /// + /// ## Default impl + /// + /// A default impl allow these change to be non-breaking. + /// By default it is `false`, async compatibility is opt-in. + /// + /// ## Temporary + /// + /// see + fn is_async_compatible(&self) -> bool { + false + } } /// Trait for Searching in a backend. @@ -343,7 +358,7 @@ pub trait WriteBackend: ReadBackend { mock! { Backend {} - impl ReadBackend for Backend{ + impl ReadBackend for Backend { fn location(&self) -> String; fn list_with_size(&self, tpe: FileType) -> RusticResult>; fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult; @@ -355,6 +370,7 @@ mock! { offset: u32, length: u32, ) -> RusticResult; + fn is_async_compatible(&self) -> bool; } impl WriteBackend for Backend { @@ -400,6 +416,9 @@ impl ReadBackend for Arc { self.deref() .read_partial(tpe, id, cacheable, offset, length) } + fn is_async_compatible(&self) -> bool { + self.deref().is_async_compatible() + } } impl std::fmt::Debug for dyn WriteBackend { diff --git a/crates/core/src/backend/cache.rs b/crates/core/src/backend/cache.rs index 8d82ecb0..99822388 100644 --- a/crates/core/src/backend/cache.rs +++ b/crates/core/src/backend/cache.rs @@ -69,7 +69,10 @@ impl ReadBackend for CachedBackend { if tpe.is_cacheable() { if let Err(err) = self.cache.remove_not_in_list(tpe, &list) { - warn!("Error in cache backend removing files {tpe:?}: {err}"); + warn!( + "Error in cache backend removing files {tpe:?}: {}", + err.display_log() + ); } } @@ -95,12 +98,18 @@ impl ReadBackend for CachedBackend { match self.cache.read_full(tpe, id) { Ok(Some(data)) => return Ok(data), Ok(None) => {} - Err(err) => warn!("Error in cache backend reading {tpe:?},{id}: {err}"), + Err(err) => warn!( + "Error in cache backend reading {tpe:?},{id}: {}", + err.display_log() + ), } let res = self.be.read_full(tpe, id); if let Ok(data) = &res { if let Err(err) = self.cache.write_bytes(tpe, id, data) { - warn!("Error in cache backend writing {tpe:?},{id}: {err}"); + warn!( + "Error in cache backend writing {tpe:?},{id}: {}", + err.display_log() + ); } } res @@ -138,14 +147,20 @@ impl ReadBackend for CachedBackend { match self.cache.read_partial(tpe, id, offset, length) { Ok(Some(data)) => return Ok(data), Ok(None) => {} - Err(err) => warn!("Error in cache backend reading {tpe:?},{id}: {err}"), + Err(err) => warn!( + "Error in cache backend reading {tpe:?},{id}: {}", + err.display_log() + ), }; // read full file, save to cache and return partial content match self.be.read_full(tpe, id) { Ok(data) => { let range = offset as usize..(offset + length) as usize; if let Err(err) = self.cache.write_bytes(tpe, id, &data) { - warn!("Error in cache backend writing {tpe:?},{id}: {err}"); + warn!( + "Error in cache backend writing {tpe:?},{id}: {}", + err.display_log() + ); } Ok(Bytes::copy_from_slice(&data.slice(range))) } @@ -162,6 +177,10 @@ impl ReadBackend for CachedBackend { fn warm_up(&self, tpe: FileType, id: &Id) -> RusticResult<()> { self.be.warm_up(tpe, id) } + + fn is_async_compatible(&self) -> bool { + self.be.is_async_compatible() + } } impl WriteBackend for CachedBackend { @@ -183,7 +202,10 @@ impl WriteBackend for CachedBackend { fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { if cacheable || tpe.is_cacheable() { if let Err(err) = self.cache.write_bytes(tpe, id, &buf) { - warn!("Error in cache backend writing {tpe:?},{id}: {err}"); + warn!( + "Error in cache backend writing {tpe:?},{id}: {}", + err.display_log() + ); } } self.be.write_bytes(tpe, id, cacheable, buf) @@ -200,7 +222,10 @@ impl WriteBackend for CachedBackend { fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { if cacheable || tpe.is_cacheable() { if let Err(err) = self.cache.remove(tpe, id) { - warn!("Error in cache backend removing {tpe:?},{id}: {err}"); + warn!( + "Error in cache backend removing {tpe:?},{id}: {}", + err.display_log() + ); } } self.be.remove(tpe, id, cacheable) @@ -491,6 +516,7 @@ impl Cache { "Failed to read at offset `{offset}` from file at `{path}`", err, ) + .attach_context("path", path.display().to_string()) .attach_context("tpe", tpe.to_string()) .attach_context("id", id.to_string()) .attach_context("offset", offset.to_string()) diff --git a/crates/core/src/backend/decrypt.rs b/crates/core/src/backend/decrypt.rs index 414084f4..fa1cc7c6 100644 --- a/crates/core/src/backend/decrypt.rs +++ b/crates/core/src/backend/decrypt.rs @@ -216,7 +216,7 @@ pub trait DecryptWriteBackend: WriteBackend + Clone + 'static { /// /// # Returns /// - /// The processed data, the original data length and when compression is used, the uncomressed length + /// The processed data, the original data length and when compression is used, the uncompressed length fn process_data(&self, data: &[u8]) -> RusticResult<(Vec, u32, Option)>; /// Writes the given data to the backend without compression and returns the id of the data. @@ -559,7 +559,7 @@ impl DecryptWriteBackend for DecryptBackend { /// /// # Arguments /// - /// * `extra_echeck` - The compression level to use for zstd. + /// * `extra_verify` - The compression level to use for zstd. fn set_extra_verify(&mut self, extra_verify: bool) { self.extra_verify = extra_verify; } @@ -622,6 +622,10 @@ impl ReadBackend for DecryptBackend { ) -> RusticResult { self.be.read_partial(tpe, id, cacheable, offset, length) } + + fn is_async_compatible(&self) -> bool { + self.be.is_async_compatible() + } } impl WriteBackend for DecryptBackend { diff --git a/crates/core/src/backend/dry_run.rs b/crates/core/src/backend/dry_run.rs index 5bd07cb8..300656fe 100644 --- a/crates/core/src/backend/dry_run.rs +++ b/crates/core/src/backend/dry_run.rs @@ -103,6 +103,10 @@ impl ReadBackend for DryRunBackend { ) -> RusticResult { self.be.read_partial(tpe, id, cacheable, offset, length) } + + fn is_async_compatible(&self) -> bool { + self.be.is_async_compatible() + } } impl DecryptWriteBackend for DryRunBackend { diff --git a/crates/core/src/backend/hotcold.rs b/crates/core/src/backend/hotcold.rs index e5306e2f..594e1851 100644 --- a/crates/core/src/backend/hotcold.rs +++ b/crates/core/src/backend/hotcold.rs @@ -75,6 +75,10 @@ impl ReadBackend for HotColdBackend { fn warm_up(&self, tpe: FileType, id: &Id) -> RusticResult<()> { self.be.warm_up(tpe, id) } + + fn is_async_compatible(&self) -> bool { + self.be.is_async_compatible() + } } impl WriteBackend for HotColdBackend { diff --git a/crates/core/src/backend/ignore.rs b/crates/core/src/backend/ignore.rs index a3c5cf2d..f33b49ce 100644 --- a/crates/core/src/backend/ignore.rs +++ b/crates/core/src/backend/ignore.rs @@ -358,10 +358,10 @@ impl ReadSource for LocalSource { fn size(&self) -> RusticResult> { let mut size = 0; for entry in self.builder.build() { - if let Err(e) = entry.and_then(|e| e.metadata()).map(|m| { + if let Err(err) = entry.and_then(|e| e.metadata()).map(|m| { size += if m.is_dir() { 0 } else { m.len() }; }) { - warn!("ignoring error {}", e); + warn!("ignoring error {err}"); } } Ok(Some(size)) @@ -656,8 +656,8 @@ fn map_entry( let links = if m.is_dir() { 0 } else { m.nlink() }; let extended_attributes = match list_extended_attributes(entry.path()) { - Err(e) => { - warn!("ignoring error: {e}\n"); + Err(err) => { + warn!("ignoring error: {err}"); vec![] } Ok(xattr_list) => xattr_list, diff --git a/crates/core/src/backend/warm_up.rs b/crates/core/src/backend/warm_up.rs index 3e25d49a..8406176d 100644 --- a/crates/core/src/backend/warm_up.rs +++ b/crates/core/src/backend/warm_up.rs @@ -59,6 +59,10 @@ impl ReadBackend for WarmUpAccessBackend { _ = self.be.read_partial(tpe, id, false, 0, 1); Ok(()) } + + fn is_async_compatible(&self) -> bool { + self.be.is_async_compatible() + } } impl WriteBackend for WarmUpAccessBackend { diff --git a/crates/core/src/blob/tree.rs b/crates/core/src/blob/tree.rs index e28d079d..2e64a24e 100644 --- a/crates/core/src/blob/tree.rs +++ b/crates/core/src/blob/tree.rs @@ -8,7 +8,6 @@ use std::{ }; use crossbeam_channel::{bounded, unbounded, Receiver, Sender}; -use derivative::Derivative; use derive_setters::Setters; use ignore::overrides::{Override, OverrideBuilder}; use ignore::Match; @@ -455,8 +454,7 @@ impl IntoIterator for Tree { } #[cfg_attr(feature = "clap", derive(clap::Parser))] -#[derive(Derivative, Clone, Debug, Setters)] -#[derivative(Default)] +#[derive(Clone, Debug, Setters)] #[setters(into)] #[non_exhaustive] /// Options for listing the `Nodes` of a `Tree` @@ -488,10 +486,21 @@ pub struct TreeStreamerOptions { /// recursively list the dir #[cfg_attr(feature = "clap", clap(long))] - #[derivative(Default(value = "true"))] pub recursive: bool, } +impl Default for TreeStreamerOptions { + fn default() -> Self { + Self { + glob: Vec::default(), + iglob: Vec::default(), + glob_file: Vec::default(), + iglob_file: Vec::default(), + recursive: true, + } + } +} + /// [`NodeStreamer`] recursively streams all nodes of a given tree including all subtrees in-order #[derive(Debug, Clone)] pub struct NodeStreamer<'a, BE, I> diff --git a/crates/core/src/commands/backup.rs b/crates/core/src/commands/backup.rs index 0b7e6602..78327eba 100644 --- a/crates/core/src/commands/backup.rs +++ b/crates/core/src/commands/backup.rs @@ -267,7 +267,7 @@ pub(crate) fn backup( let (parent_id, parent) = opts.parent_opts.get_parent(repo, &snap, backup_stdin); match parent_id { Some(id) => { - info!("using parent {}", id); + info!("using parent {id}"); snap.parent = Some(id); } None => { @@ -276,7 +276,7 @@ pub(crate) fn backup( }; let be = DryRunBackend::new(repo.dbe().clone(), opts.dry_run); - info!("starting to backup {source}..."); + info!("starting to backup {source} ..."); let archiver = Archiver::new(be, index, repo.config(), parent, snap)?; let p = repo.pb.progress_bytes("backing up..."); diff --git a/crates/core/src/commands/check.rs b/crates/core/src/commands/check.rs index 7d0668ef..31a467a4 100644 --- a/crates/core/src/commands/check.rs +++ b/crates/core/src/commands/check.rs @@ -274,7 +274,10 @@ pub(crate) fn check_repository( .map(|(id, size)| (**id, *size)) .collect(); if let Err(err) = cache.remove_not_in_list(FileType::Pack, &ids) { - warn!("Error in cache backend removing pack files: {err}"); + warn!( + "Error in cache backend removing pack files: {}", + err.display_log() + ); } p.finish(); @@ -309,21 +312,13 @@ pub(crate) fn check_repository( let data = match be.read_full(FileType::Pack, &id) { Ok(data) => data, Err(err) => { - // FIXME: This needs different handling, now it prints a full display of RusticError - // Instead we should actually collect and return a list of errors on the happy path - // for `Check`, as this is a non-critical operation and we want to show all errors - // to the user. - error!("Error reading data for pack {id} : {err}"); + error!("Error reading data for pack {id} : {}", err.display_log()); return; } }; match check_pack(be, pack, data, &p) { Ok(()) => {} - // FIXME: This needs different handling, now it prints a full display of RusticError - // Instead we should actually collect and return a list of errors on the happy path - // for `Check`, as this is a non-critical operation and we want to show all errors - // to the user. - Err(err) => error!("Pack {id} is not valid: {err}",), + Err(err) => error!("Pack {id} is not valid: {}", err.display_log()), } }); p.finish(); @@ -362,7 +357,6 @@ fn check_hot_files( match files.remove(&id) { None => error!("hot file Type: {file_type:?}, Id: {id} does not exist in repo"), Some(size) if size != size_hot => { - // TODO: This should be an actual error not a log entry error!("Type: {file_type:?}, Id: {id}: hot size: {size_hot}, actual size: {size}"); } _ => {} //everything ok @@ -415,10 +409,16 @@ fn check_cache_files( be.read_full(file_type, &id), ) { (Err(err), _) => { - error!("Error reading cached file Type: {file_type:?}, Id: {id} : {err}"); + error!( + "Error reading cached file Type: {file_type:?}, Id: {id} : {}", + err.display_log() + ); } (_, Err(err)) => { - error!("Error reading file Type: {file_type:?}, Id: {id} : {err}"); + error!( + "Error reading file Type: {file_type:?}, Id: {id} : {}", + err.display_log() + ); } (Ok(Some(data_cached)), Ok(data)) if data_cached != data => { error!( diff --git a/crates/core/src/commands/repair/index.rs b/crates/core/src/commands/repair/index.rs index 4ff7dc2f..ba0ecfa9 100644 --- a/crates/core/src/commands/repair/index.rs +++ b/crates/core/src/commands/repair/index.rs @@ -90,7 +90,10 @@ pub(crate) fn repair_index( debug!("reading pack {id}..."); match PackHeader::from_file(be, id, size_hint, packsize) { Err(err) => { - warn!("error reading pack {id} (-> removing from index): {err}"); + warn!( + "error reading pack {id} (-> removing from index): {}", + err.display_log() + ); } Ok(header) => { let pack = IndexPack { diff --git a/crates/core/src/commands/repair/snapshots.rs b/crates/core/src/commands/repair/snapshots.rs index ebc43c87..1672e09b 100644 --- a/crates/core/src/commands/repair/snapshots.rs +++ b/crates/core/src/commands/repair/snapshots.rs @@ -213,8 +213,8 @@ pub(crate) fn repair_tree( } let (tree, mut changed) = Tree::from_backend(be, index, id).map_or_else( - |_err| { - warn!("tree {id} could not be loaded."); + |err| { + warn!("tree {id} could not be loaded: {}", err.display_log()); (Tree::new(), Changed::This) }, |tree| (tree, Changed::None), diff --git a/crates/core/src/error.rs b/crates/core/src/error.rs index 5d98035f..9d6b3933 100644 --- a/crates/core/src/error.rs +++ b/crates/core/src/error.rs @@ -19,7 +19,7 @@ //! `Result (==RusticResult)` or nested results like `RusticResult>`. //! So even if the visibility of that function is `fn` or `pub(crate) fn` it should return a `RusticResult` containing a `RusticError`. //! -//! If we `map_err` or `and_then` a `RusticError`, we don't want to create a new RusticError from it, but just attach some context +//! If we `map_err` or `and_then` a `RusticError`, we don't want to create a new `RusticError` from it, but just attach some context //! to it, e.g. `map_err(|e| e.attach_context("key", "value"))`, so we don't lose the original error. We can also change the error //! kind with `map_err(|e| e.overwrite_kind(ErrorKind::NewKind))`. If we want to pre- or append to the guidance, we can use //! `map_err(|e| e.append_guidance_line("new line"))` or `map_err(|e| e.prepend_guidance_line("new line"))`. @@ -48,11 +48,6 @@ //! All types that we want to attach to an error should implement `Display` and `Debug` to provide a good error message and a nice way //! to display the error. -// FIXME: Remove when 'displaydoc' has fixed/recommended further treatment upstream: https://github.com/yaahc/displaydoc/issues/48 -#![allow(clippy::doc_markdown)] -// use std::error::Error as StdError; -// use std::fmt; - use derive_more::derive::Display; use ecow::{EcoString, EcoVec}; use std::{ @@ -293,7 +288,7 @@ impl Display for RusticError { writeln!(f, "Caused by:")?; writeln!(f, "{cause}")?; if let Some(source) = cause.source() { - write!(f, " : (source: {source:?})")?; + write!(f, " : (source: {source})")?; } writeln!(f)?; } @@ -375,6 +370,53 @@ impl RusticError { ) -> Box { Self::with_source(kind, error.to_string(), error) } + + /// Returns a String representation for logging purposes. + /// + /// This is a more concise version of the error message. + pub fn display_log(&self) -> String { + use std::fmt::Write; + + let mut fmt = String::new(); + + _ = write!(fmt, "Error: "); + + if self.context.is_empty() { + _ = write!(fmt, "{}", self.guidance); + } else { + // If there is context, we want to iterate over it + // use the key to replace the placeholder in the guidance. + let mut guidance = self.guidance.to_string(); + + self.context + .iter() + // remove context which has been used in the guidance + .for_each(|(key, value)| { + let pattern = "{".to_owned() + key + "}"; + guidance = guidance.replace(&pattern, value); + }); + + _ = write!(fmt, "{guidance}"); + }; + + _ = write!(fmt, " (kind: related to {}", self.kind); + + if let Some(code) = &self.error_code { + _ = write!(fmt, ", code: {code}"); + } + + _ = write!(fmt, ")"); + + if let Some(cause) = &self.source { + _ = write!(fmt, ": caused by: {cause}"); + + if let Some(source) = cause.source() { + _ = write!(fmt, " : (source: {source})"); + } + } + + fmt + } } // Setters for anything we do want to expose publicly. diff --git a/crates/core/src/repofile/snapshotfile.rs b/crates/core/src/repofile/snapshotfile.rs index ec7eecc6..90f17903 100644 --- a/crates/core/src/repofile/snapshotfile.rs +++ b/crates/core/src/repofile/snapshotfile.rs @@ -9,7 +9,6 @@ use std::{ use chrono::{DateTime, Duration, Local, OutOfRangeError}; #[cfg(feature = "clap")] use clap::ValueHint; -use derivative::Derivative; use derive_setters::Setters; use dunce::canonicalize; use gethostname::gethostname; @@ -162,9 +161,8 @@ impl SnapshotOptions { /// /// This is an extended version of the summaryOutput structure of restic in /// restic/internal/ui/backup$/json.go -#[derive(Serialize, Deserialize, Debug, Clone, Derivative)] +#[derive(Serialize, Deserialize, Debug, Clone)] #[serde(default)] -#[derivative(Default)] #[non_exhaustive] pub struct SnapshotSummary { /// New files compared to the last (i.e. parent) snapshot @@ -229,11 +227,9 @@ pub struct SnapshotSummary { /// # Note /// /// This may differ from the snapshot `time`. - #[derivative(Default(value = "Local::now()"))] pub backup_start: DateTime, /// The time that the backup has been finished. - #[derivative(Default(value = "Local::now()"))] pub backup_end: DateTime, /// Total duration of the backup in seconds, i.e. the time between `backup_start` and `backup_end` @@ -243,6 +239,36 @@ pub struct SnapshotSummary { pub total_duration: f64, } +impl Default for SnapshotSummary { + fn default() -> Self { + Self { + files_new: Default::default(), + files_changed: Default::default(), + files_unmodified: Default::default(), + total_files_processed: Default::default(), + total_bytes_processed: Default::default(), + dirs_new: Default::default(), + dirs_changed: Default::default(), + dirs_unmodified: Default::default(), + total_dirs_processed: Default::default(), + total_dirsize_processed: Default::default(), + data_blobs: Default::default(), + tree_blobs: Default::default(), + data_added: Default::default(), + data_added_packed: Default::default(), + data_added_files: Default::default(), + data_added_files_packed: Default::default(), + data_added_trees: Default::default(), + data_added_trees_packed: Default::default(), + command: String::default(), + backup_start: Local::now(), + backup_end: Local::now(), + backup_duration: Default::default(), + total_duration: Default::default(), + } + } +} + impl SnapshotSummary { /// Create a new [`SnapshotSummary`]. /// @@ -269,11 +295,10 @@ impl SnapshotSummary { } /// Options for deleting snapshots. -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Derivative, Copy)] -#[derivative(Default)] +#[derive(Serialize, Default, Deserialize, Debug, Clone, PartialEq, Eq, Copy)] pub enum DeleteOption { /// No delete option set. - #[derivative(Default)] + #[default] NotSet, /// This snapshot should be never deleted (remove-protection). Never, @@ -291,8 +316,7 @@ impl DeleteOption { impl_repofile!(SnapshotId, FileType::Snapshot, SnapshotFile); #[skip_serializing_none] -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] -#[derivative(Default)] +#[derive(Debug, Clone, Serialize, Deserialize)] /// A [`SnapshotFile`] is the repository representation of the snapshot metadata saved in a repository. /// /// It is usually saved in the repository under `snapshot/` @@ -302,14 +326,10 @@ impl_repofile!(SnapshotId, FileType::Snapshot, SnapshotFile); /// [`SnapshotFile`] implements [`Eq`], [`PartialEq`], [`Ord`], [`PartialOrd`] by comparing only the `time` field. /// If you need another ordering, you have to implement that yourself. pub struct SnapshotFile { - #[derivative(Default(value = "Local::now()"))] /// Timestamp of this snapshot pub time: DateTime, /// Program identifier and its version that have been used to create this snapshot. - #[derivative(Default( - value = "\"rustic \".to_string() + option_env!(\"PROJECT_VERSION\").unwrap_or(env!(\"CARGO_PKG_VERSION\"))" - ))] #[serde(default, skip_serializing_if = "String::is_empty")] pub program_version: String, @@ -364,6 +384,33 @@ pub struct SnapshotFile { pub id: SnapshotId, } +impl Default for SnapshotFile { + fn default() -> Self { + Self { + time: Local::now(), + program_version: { + let project_version = + option_env!("PROJECT_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); + format!("rustic {project_version}") + }, + parent: Option::default(), + tree: TreeId::default(), + label: String::default(), + paths: StringList::default(), + hostname: String::default(), + username: String::default(), + uid: Default::default(), + gid: Default::default(), + tags: StringList::default(), + original: Option::default(), + delete: DeleteOption::default(), + summary: Option::default(), + description: Option::default(), + id: SnapshotId::default(), + } + } +} + impl SnapshotFile { /// Create a [`SnapshotFile`] from [`SnapshotOptions`]. /// @@ -560,7 +607,7 @@ impl SnapshotFile { /// * If no id could be found. /// * If the id is not unique. pub(crate) fn from_id(be: &B, id: &str) -> RusticResult { - info!("getting snapshot..."); + info!("getting snapshot ..."); let id = be.find_id(FileType::Snapshot, id)?; Self::from_backend(be, &SnapshotId::from(id)) } diff --git a/crates/core/src/repository.rs b/crates/core/src/repository.rs index 2d7726b4..250b2899 100644 --- a/crates/core/src/repository.rs +++ b/crates/core/src/repository.rs @@ -755,6 +755,19 @@ impl Repository { pub fn list(&self) -> RusticResult> { Ok(self.be.list(T::TYPE)?.into_iter().map(Into::into)) } + + /// Check if one of this repository backend is incompatible + /// with async features in `rustic_core` implementations. + /// + /// see + pub fn is_async_compatible(&self) -> bool { + // check if be or be_hot is incompatible with async + self.be.is_async_compatible() + && self + .be_hot + .as_ref() + .map_or(true, ReadBackend::is_async_compatible) + } } impl Repository { @@ -2003,7 +2016,7 @@ impl Repository { impl Repository { /// Run a backup of `source` using the given options. /// - /// You have to give a preflled [`SnapshotFile`] which is modified and saved. + /// You have to give a prefilled [`SnapshotFile`] which is modified and saved. /// /// # Arguments /// diff --git a/crates/core/src/repository/warm_up.rs b/crates/core/src/repository/warm_up.rs index 22131a92..a1a74a6b 100644 --- a/crates/core/src/repository/warm_up.rs +++ b/crates/core/src/repository/warm_up.rs @@ -144,9 +144,9 @@ fn warm_up_repo( pool.in_place_scope(|scope| { for pack in packs { scope.spawn(move |_| { - if let Err(e) = backend.warm_up(FileType::Pack, &pack) { + if let Err(err) = backend.warm_up(FileType::Pack, &pack) { // FIXME: Use error handling - error!("warm-up failed for pack {pack:?}. {e}"); + error!("warm-up failed for pack {pack:?}. {}", err.display_log()); }; progress_bar_ref.inc(1); }); diff --git a/crates/core/src/vfs.rs b/crates/core/src/vfs.rs index 5904ea4d..00d35672 100644 --- a/crates/core/src/vfs.rs +++ b/crates/core/src/vfs.rs @@ -173,8 +173,10 @@ impl VfsTree { } } -#[derive(Debug, Clone, Copy, EnumString)] +#[cfg_attr(feature = "clap", derive(clap::ValueEnum))] +#[derive(Debug, Clone, Copy, EnumString, serde::Deserialize, serde::Serialize)] #[strum(ascii_case_insensitive)] +#[serde(rename_all = "lowercase")] #[non_exhaustive] /// Policy to describe how to handle access to a file pub enum FilePolicy { diff --git a/crates/core/tests/errors.rs b/crates/core/tests/errors.rs index 2171dd81..29842b99 100644 --- a/crates/core/tests/errors.rs +++ b/crates/core/tests/errors.rs @@ -20,12 +20,35 @@ fn error() -> Box { .ask_report() } +#[fixture] +fn minimal_error() -> Box { + RusticError::new( + ErrorKind::InputOutput, + "A file could not be read, make sure the file at `{path}` is existing and readable by the system.", + ) + .attach_context("path", "/path/to/file") +} + #[rstest] +#[allow(clippy::boxed_local)] fn test_error_display(error: Box) { insta::assert_snapshot!(error); } #[rstest] +#[allow(clippy::boxed_local)] fn test_error_debug(error: Box) { insta::assert_debug_snapshot!(error); } + +#[rstest] +#[allow(clippy::boxed_local)] +fn test_log_output_passes(error: Box) { + insta::assert_snapshot!(error.display_log()); +} + +#[rstest] +#[allow(clippy::boxed_local)] +fn test_log_output_minimal_passes(minimal_error: Box) { + insta::assert_snapshot!(minimal_error.display_log()); +} diff --git a/crates/core/tests/snapshots/errors__log_output_minimal_passes.snap b/crates/core/tests/snapshots/errors__log_output_minimal_passes.snap new file mode 100644 index 00000000..ca438435 --- /dev/null +++ b/crates/core/tests/snapshots/errors__log_output_minimal_passes.snap @@ -0,0 +1,5 @@ +--- +source: crates/core/tests/errors.rs +expression: minimal_error.to_log_output() +--- +Error: A file could not be read, make sure the file at `/path/to/file` is existing and readable by the system. (kind: related to input/output operations) diff --git a/crates/core/tests/snapshots/errors__log_output_passes.snap b/crates/core/tests/snapshots/errors__log_output_passes.snap new file mode 100644 index 00000000..2c9db8ed --- /dev/null +++ b/crates/core/tests/snapshots/errors__log_output_passes.snap @@ -0,0 +1,7 @@ +--- +source: crates/core/tests/errors.rs +expression: error.to_log_output() +--- +Error: Prepended guidance line +A file could not be read, make sure the file at `/path/to/file` is existing and readable by the system. +Appended guidance line (kind: related to input/output operations, code: C001): caused by: Networking Error diff --git a/crates/testing/CHANGELOG.md b/crates/testing/CHANGELOG.md index 932608ad..9ef27986 100644 --- a/crates/testing/CHANGELOG.md +++ b/crates/testing/CHANGELOG.md @@ -6,6 +6,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.0](https://github.com/rustic-rs/rustic_core/compare/rustic_testing-v0.2.3...rustic_testing-v0.3.0) - 2024-11-18 + +### Added + +- *(async)* add `async_compatible` methods to identify backend compatibility ([#355](https://github.com/rustic-rs/rustic_core/pull/355)) + +### Other + +- *(errors)* [**breaking**] Improve error handling, display and clean up codebase ([#321](https://github.com/rustic-rs/rustic_core/pull/321)) + ## [0.2.3](https://github.com/rustic-rs/rustic_core/compare/rustic_testing-v0.2.2...rustic_testing-v0.2.3) - 2024-10-25 ### Other diff --git a/crates/testing/Cargo.toml b/crates/testing/Cargo.toml index 9dc76422..ef521319 100644 --- a/crates/testing/Cargo.toml +++ b/crates/testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rustic_testing" -version = "0.2.3" +version = "0.3.0" edition = "2021" license = "Apache-2.0 OR MIT" publish = true diff --git a/crates/testing/src/backend.rs b/crates/testing/src/backend.rs index 418bd320..64e9b070 100644 --- a/crates/testing/src/backend.rs +++ b/crates/testing/src/backend.rs @@ -58,6 +58,11 @@ pub mod in_memory_backend { ) -> RusticResult { Ok(self.0.read().unwrap()[tpe][id].slice(offset as usize..(offset + length) as usize)) } + + /// [`InMemoryBackend`] doesn't use `async`, even under the hood. + fn is_async_compatible(&self) -> bool { + true + } } impl WriteBackend for InMemoryBackend { diff --git a/deny.toml b/deny.toml index 6c630191..d2c10e72 100644 --- a/deny.toml +++ b/deny.toml @@ -74,10 +74,6 @@ ignore = [ # FIXME!: See https://github.com/RustCrypto/RSA/issues/19#issuecomment-1822995643. # There is no workaround available yet. "RUSTSEC-2023-0071", - # FIXME: backoff => used in backend, need to be replaced with backon - "RUSTSEC-2024-0384", - # FIXME: derivative => used for default impls - "RUSTSEC-2024-0388", # { id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" }, # "a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish # { crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" },