Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Re-introduce property 9 #1093

Merged
merged 4 commits into from
Nov 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@
cc 14b817f8f064c889dabecae41294027ede294a5e51f613ddba46d8bb352f47f5 # shrinks to seed = 18077722532471589563, state = State { nodes: {2: Directory { name: "BUndwQB7", dir: Directory { children: {3}, parent: Some(1) } }, 1: Directory { name: "/", dir: Directory { children: {2}, parent: None } }, 3: File { file: File { parent: 2, bytes_written: 0, bytes_read: 0, access_tick: 0, modified_tick: 0, status_tick: 0, bytes_per_tick: 72, read_only: false, peer: None, ordinal: 0, group_id: 0, open_handles: 0, unlinked: false } }}, root_inode: 1, now: 0, max_rotations: 8, max_bytes_per_file: 32273, group_names: [["77sYYEM2_0.log", "77sYYEM2_0.log.1", "77sYYEM2_0.log.2", "77sYYEM2_0.log.3", "77sYYEM2_0.log.4", "77sYYEM2_0.log.5", "77sYYEM2_0.log.6", "77sYYEM2_0.log.7", "77sYYEM2_0.log.8"]], next_inode: 4, .. }, operations = [GetAttr, Read { offset: 873, size: 578 }, GetAttr, Wait { ticks: 67 }, Close, Open, Close, GetAttr, GetAttr, Open, GetAttr, Wait { ticks: 76 }, Wait { ticks: 90 }, Open, Wait { ticks: 73 }, Wait { ticks: 94 }, Close, Lookup { name: Some("¥'?*:𐠸𝜒A£⳾6?𐢬_Ѩ%ౝ0?{`.𑌂dM") }, Wait { ticks: 63 }, Open, Open, Open, Lookup { name: Some(";0$Ѩ¥𐖘Ⱥ.${*_:gn`Ⱥ\u{113d8}*Z") }, Open, Read { offset: 596, size: 401 }, Read { offset: 314, size: 229 }, Read { offset: 876, size: 934 }, Read { offset: 899, size: 782 }, Read { offset: 871, size: 732 }, Wait { ticks: 3 }, Read { offset: 986, size: 52 }, Lookup { name: None }, Lookup { name: None }, Read { offset: 586, size: 180 }, Close, Open, Lookup { name: Some("𞹾&Ⱥ<") }, GetAttr, Read { offset: 499, size: 626 }, Lookup { name: Some("𑥗%@�^ೋ𝄓") }, Read { offset: 625, size: 519 }, Open, Read { offset: 26, size: 857 }, GetAttr, Read { offset: 530, size: 378 }, Read { offset: 95, size: 717 }, GetAttr, Close, Read { offset: 119, size: 956 }, Open, GetAttr, Read { offset: 760, size: 956 }, Close, Wait { ticks: 98 }, Wait { ticks: 12 }, Read { offset: 138, size: 227 }, Wait { ticks: 41 }, GetAttr]
cc 84a14bb361e5846589558e1fc52c5dee33d22e789034ef13c61f30ca4856d5da # shrinks to seed = 1512443422463708349, state = State { nodes: {1: Directory { name: "/", dir: Directory { children: {2}, parent: None } }, 2: Directory { name: "eKZTyj4p", dir: Directory { children: {3}, parent: Some(1) } }, 3: File { file: File { parent: 2, bytes_written: 0, bytes_read: 0, access_tick: 0, modified_tick: 0, status_tick: 0, bytes_per_tick: 4, read_only: false, peer: None, ordinal: 0, group_id: 0, open_handles: 0, unlinked: false } }}, root_inode: 1, now: 0, max_rotations: 2, max_bytes_per_file: 196227, group_names: [["F5Anm0dg_0.log", "F5Anm0dg_0.log.1", "F5Anm0dg_0.log.2"]], next_inode: 4, .. }, operations = [Wait { ticks: 40 }, Lookup { name: Some("𑌷C&𞺡\"?\"$<&%{$౿ோ") }, GetAttr, GetAttr, GetAttr, GetAttr, GetAttr, GetAttr, Wait { ticks: 17 }, Close, Read { offset: 225, size: 373 }, Wait { ticks: 34 }, Lookup { name: Some("ኻࠕN?¥<q*=%É\u{1d244}₹") }, Lookup { name: None }, Read { offset: 161, size: 111 }, Open, Lookup { name: None }, Wait { ticks: 24 }, Lookup { name: None }, Open, Lookup { name: Some("!*Ѩ𑼍ᛵﮩꬌῼ𞋿יּ:\"") }, Read { offset: 692, size: 769 }, Lookup { name: None }, Lookup { name: Some("𞅅.Ѩf$🉑{lⴧ\\\\?𞸻;સ./⋢ꚱÆᏽ`𐰃𖽻𐖇p𞋿h𝓂m{Z") }, Wait { ticks: 85 }, Open, Read { offset: 220, size: 438 }, Read { offset: 991, size: 393 }, Read { offset: 793, size: 379 }, Lookup { name: None }, GetAttr, Read { offset: 108, size: 606 }, Lookup { name: None }, Lookup { name: Some("𐖠\"n.[") }, Read { offset: 207, size: 867 }, Read { offset: 995, size: 862 }, GetAttr, Read { offset: 633, size: 542 }, Close, Close, Lookup { name: Some("Vպ۞.\"\"𐤈\\磌῞k`'𛲔\"�*|\\'{\u{b82}₻FᱤZP🕴.\\Ⱥ") }, GetAttr, GetAttr, Read { offset: 138, size: 921 }, Wait { ticks: 45 }]
cc a0f297919ada1079a80f8e7b2207fb5068dbe284c131b97ef3be4b371f2cd633 # shrinks to seed = 0, state = State { nodes: {1: Directory { name: "/", dir: Directory { children: {2}, parent: None } }, 2: Directory { name: "lfURbM5a", dir: Directory { children: {3}, parent: Some(1) } }, 3: File { file: File { parent: 2, bytes_written: 0, bytes_read: 0, created_tick: 1, access_tick: 1, modified_tick: 1, status_tick: 1, bytes_per_tick: 0, read_only: false, read_only_since: None, peer: None, ordinal: 0, group_id: 0, open_handles: 0, unlinked: false, max_offset_observed: 0 } }}, root_inode: 1, now: 1, max_rotations: 1, max_bytes_per_file: 1024, group_names: [["olvicJh4_0.log", "olvicJh4_0.log.1"]], next_inode: 4, .. }, operations = [Wait { ticks: 19 }, Wait { ticks: 60 }, Wait { ticks: 0 }, Wait { ticks: 0 }]
cc a74443385123c18f03fab3511fa56bb75f87408e84d48367f9159eac1cd84374 # shrinks to seed = 0, state = State { nodes: {1: Directory { name: "/", dir: Directory { children: {2}, parent: None } }, 2: Directory { name: "lfURbM5a", dir: Directory { children: {3}, parent: Some(1) } }, 3: File { file: File { parent: 2, bytes_written: 0, bytes_read: 0, created_tick: 1, access_tick: 1, modified_tick: 1, status_tick: 1, bytes_per_tick: 0, read_only: false, read_only_since: None, peer: None, ordinal: 0, group_id: 0, open_handles: 0, unlinked: false, max_offset_observed: 0 } }}, root_inode: 1, now: 1, max_rotations: 1, max_bytes_per_file: 1024, group_names: [["olvicJh4_0.log", "olvicJh4_0.log.1"]], next_inode: 4, .. }, operations = [Wait { ticks: 2 }]
80 changes: 29 additions & 51 deletions lading/src/generator/file_gen/logrotate_fs/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -228,14 +228,6 @@ impl File {
pub(crate) fn incr_ordinal(&mut self) {
self.ordinal = self.ordinal.saturating_add(1);
}

/// Returns the current size in bytes of the File
///
/// This function does not advance time.
#[must_use]
pub(crate) fn size(&self) -> u64 {
self.bytes_written
}
}

/// Model representation of a `Directory`. Contains children are `Directory`
Expand Down Expand Up @@ -319,6 +311,7 @@ impl std::fmt::Debug for State {
.field("max_bytes_per_file", &self.max_bytes_per_file)
.field("group_names", &self.group_names)
.field("next_inode", &self.next_inode)
.field("load_profile", &self.load_profile)
.finish_non_exhaustive()
}
}
Expand Down Expand Up @@ -592,19 +585,19 @@ impl State {
Node::Directory { .. } => continue,
};

// No matter what we advance time for the file.
file.advance_time(now);

// If the file is read-only we have no more work to do on this file
// although it _may_ be touched if we process a peer chain below.
if file.read_only() {
continue;
}

// If the file is available for writing we advance time for the file.
file.advance_time(now);

// Determine if the file pointed to by inode needs to be rotated. A
// file is only rotated if it is linked, that is, it has a name in
// the filesystem.
if file.size() < self.max_bytes_per_file {
if file.bytes_written < self.max_bytes_per_file {
continue;
}
assert!(
Expand Down Expand Up @@ -992,7 +985,7 @@ mod test {
(
any::<u64>(), // seed
1u8..=8u8, // max_rotations
1024u64..=500_000u64, // max_bytes_per_file
1u64..=100_000u64, // max_bytes_per_file
1u8..=4u8, // max_depth
1u16..=16u16, // concurrent_logs
1u64..=1000u64, // initial_tick
Expand Down Expand Up @@ -1216,44 +1209,29 @@ mod test {
}
}

// // Property 8: max(bytes_written) <= max_bytes_per_file + bytes_per_second
// //
// // If just prior to a rollover the file is within bytes_per_second of
// // max_bytes_per_file on the next tick that the rollover happens the
// // file will be larger than max_bytes_per_file but to a limited degree.
// for node in state.nodes.values() {
// if let Node::File { file } = node {
// if file.unlinked {
// continue;
// }
// let max_size = state.max_bytes_per_file + file.bytes_per_tick;
// assert!(
// file.size() <= max_size,
// "File size {sz} exceeds max allowed size {max_size}",
// sz = file.size()
// );
// }
// }

// // Property 9: Rotated files have bytes_written within acceptable range
// //
// // For a rotated file (read_only == true), bytes_written should be
// // within (max_bytes_per_file - bytes_per_tick) <= bytes_written <
// // (max_bytes_per_file + bytes_per_tick).
// for node in state.nodes.values() {
// if let Node::File { file } = node {
// if !file.read_only {
// continue;
// }
// let min_size = state.max_bytes_per_file.saturating_sub(file.bytes_per_tick);
// let max_size = state.max_bytes_per_file.saturating_add(file.bytes_per_tick);
// assert!(
// file.bytes_written >= min_size && file.bytes_written < max_size,
// "Rotated file size {bytes_written} not in expected range [{min_size}, {max_size})",
// bytes_written = file.bytes_written
// );
// }
// }
// Property 8: Rotated files have bytes_written within acceptable range
//
// For a rotated file (read_only == true), bytes_written should be
// within max_bytes_per_file <= bytes_written < (max_bytes_per_file + 2
// * bytes_per_tick). It's possible because of when rotation is done
// that a full tick will elapse, allowing an additional tick worth of
// bytes to be written, hence the 2x.
for node in state.nodes.values() {
if let Node::File { file } = node {
if !file.read_only {
continue;
}
let min_size = state.max_bytes_per_file;
let max_size = state
.max_bytes_per_file
.saturating_add(2 * file.bytes_per_tick);
assert!(
file.bytes_written >= min_size && file.bytes_written <= max_size,
"Rotated file size {actual} not in expected range [{min_size}, {max_size}]",
actual = file.bytes_written
);
}
}
}

fn compute_expected_bytes_written(
Expand Down
Loading