Skip to content

Commit

Permalink
[feat] add unit test for axsync mechanisms
Browse files Browse the repository at this point in the history
  • Loading branch information
hky1999 committed Oct 28, 2024
1 parent e8d3ea1 commit 4fc3b8b
Show file tree
Hide file tree
Showing 14 changed files with 856 additions and 202 deletions.
13 changes: 3 additions & 10 deletions modules/axsync/src/barrier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@
//!
//! Note: [`Barrier`] is not available when the `multitask` feature is disabled.
#[cfg(test)]
mod tests;

use core::fmt;

use crate::condvar::Condvar;
Expand Down Expand Up @@ -99,16 +102,6 @@ impl BarrierWaitResult {
/// threads will have `false` returned.
///
/// [`wait`]: struct.Barrier.html#method.wait
///
/// # Examples
///
/// ```
/// use spin;
///
/// let barrier = spin::Barrier::new(1);
/// let barrier_wait_result = barrier.wait();
/// println!("{:?}", barrier_wait_result.is_leader());
/// ```
pub fn is_leader(&self) -> bool {
self.0
}
Expand Down
102 changes: 102 additions & 0 deletions modules/axsync/src/barrier/tests.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
use axtask as thread;

use crate::Barrier;

const NUM_TASKS: u32 = 10;
const NUM_ITERS: u32 = 10_000;

#[test]
fn test_barrier() {
let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

static BARRIER: Barrier = Barrier::new(NUM_TASKS as usize);

let mut join_handlers = Vec::new();

fn rendezvous() {
for _ in 0..NUM_ITERS {
BARRIER.wait();
}
}

for _ in 0..NUM_TASKS {
join_handlers.push(thread::spawn(rendezvous));
}

// Wait for all threads to finish.
for join_handler in join_handlers {
join_handler.join();
}

println!("Barrier test OK");
}

#[test]
fn test_wait_result() {
let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

static BARRIER: Barrier = Barrier::new(1);

// The first thread to call `wait` will be the leader.
assert_eq!(BARRIER.wait().is_leader(), true);

// Since the barrier is reusable, the next thread to call `wait` will also be the leader.
assert_eq!(BARRIER.wait().is_leader(), true);

static BARRIER2: Barrier = Barrier::new(2);

thread::spawn(|| {
assert_eq!(BARRIER2.wait().is_leader(), true);
});

// The first thread to call `wait` won't be the leader.
assert_eq!(BARRIER2.wait().is_leader(), false);

thread::yield_now();

println!("BarrierWaitResult test OK");
}

#[test]
fn test_barrier_wait_result() {
use std::sync::mpsc::{channel, TryRecvError};
use std::sync::Arc;

let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

let barrier = Arc::new(Barrier::new(NUM_TASKS as _));
let (tx, rx) = channel();

let mut join_handlers = Vec::new();

for _ in 0..NUM_TASKS - 1 {
let c = barrier.clone();
let tx = tx.clone();
join_handlers.push(thread::spawn(move || {
tx.send(c.wait().is_leader()).unwrap();
}));
}

// At this point, all spawned threads should be blocked,
// so we shouldn't get anything from the port
assert!(matches!(rx.try_recv(), Err(TryRecvError::Empty)));

let mut leader_found = barrier.wait().is_leader();

// Wait for all threads to finish.
for join_handler in join_handlers {
join_handler.join();
}

// Now, the barrier is cleared and we should get data.
for _ in 0..NUM_TASKS - 1 {
if rx.recv().unwrap() {
assert!(!leader_found);
leader_found = true;
}
}
assert!(leader_found);
}
5 changes: 3 additions & 2 deletions modules/axsync/src/condvar/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ mod multitask;
#[cfg(feature = "multitask")]
pub use multitask::Condvar;



/// A type indicating whether a timed wait on a condition variable returned
/// due to a time out or not.
///
Expand All @@ -35,3 +33,6 @@ impl WaitTimeoutResult {
self.0
}
}

#[cfg(test)]
mod tests;
2 changes: 1 addition & 1 deletion modules/axsync/src/condvar/no_thread.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@

//! Dummy implementation of `Condvar` for single-threaded environments.
use core::time::Duration;

use crate::Mutex;
Expand Down
175 changes: 175 additions & 0 deletions modules/axsync/src/condvar/tests.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
use std::sync::{mpsc::channel, Arc};

use axtask as thread;

use crate::{Condvar, Mutex};

const INIT_VALUE: u32 = 0;
const NUM_TASKS: u32 = 10;
const NUM_ITERS: u32 = 10_000;

fn may_interrupt() {
// simulate interrupts
if rand::random::<u32>() % 3 == 0 {
thread::yield_now();
}
}

fn inc(delta: u32, pair: Arc<(Mutex<u32>, Condvar)>) {
for _ in 0..NUM_ITERS {
let (lock, cvar) = &*pair;
let mut val = lock.lock();
*val += delta;
may_interrupt();
drop(val);
may_interrupt();
// We notify the condvar that the value has changed.
cvar.notify_one();
}
}

#[test]
fn test_wait() {
let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

let pair = Arc::new((Mutex::new(INIT_VALUE), Condvar::new()));
for _ in 0..NUM_TASKS {
let pair1 = Arc::clone(&pair);
thread::spawn(move || inc(1, pair1));
let pair2 = Arc::clone(&pair);
thread::spawn(move || inc(2, pair2));
}

// Wait for the thread to start up.
let (lock, cvar) = &*pair;
let mut val = lock.lock();
// As long as the value inside the `Mutex<usize>` is not `i`, we wait.
while *val != NUM_ITERS * NUM_TASKS * 3 {
may_interrupt();
val = cvar.wait(val);
may_interrupt();
}
drop(val);

assert!(lock.lock().eq(&(NUM_ITERS * NUM_TASKS * 3)));

println!("Condvar wait test OK");
}

#[test]
fn test_wait_while() {
let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

let pair = Arc::new((Mutex::new(INIT_VALUE), Condvar::new()));
for _ in 0..NUM_TASKS {
let pair1 = Arc::clone(&pair);
thread::spawn(move || inc(1, pair1));
let pair2 = Arc::clone(&pair);
thread::spawn(move || inc(2, pair2));
}

// Wait for the thread to start up.
let (lock, cvar) = &*pair;
// As long as the value inside the `Mutex<bool>` is `true`, we wait.
let val = cvar.wait_while(lock.lock(), |val| *val != NUM_ITERS * NUM_TASKS * 3);

assert!(val.eq(&(NUM_ITERS * NUM_TASKS * 3)));

println!("Condvar wait_while test OK");
}

#[test]
fn smoke() {
let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

let c = Condvar::new();
c.notify_one();
c.notify_all();
}

#[test]
fn notify_one() {
let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

let m = Arc::new(Mutex::new(()));
let m2 = m.clone();
let c = Arc::new(Condvar::new());
let c2 = c.clone();

let g = m.lock();
let _t = thread::spawn(move || {
let _g = m2.lock();
c2.notify_one();
});
let g = c.wait(g);
drop(g);
}

#[test]
fn notify_all() {
let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

let data = Arc::new((Mutex::new(0), Condvar::new()));
let (tx, rx) = channel();
for _ in 0..NUM_TASKS {
let data = data.clone();
let tx = tx.clone();
thread::spawn(move || {
let &(ref lock, ref cond) = &*data;
let mut cnt = lock.lock();
*cnt += 1;
if *cnt == NUM_TASKS {
tx.send(()).unwrap();
}
while *cnt != 0 {
cnt = cond.wait(cnt);
}
tx.send(()).unwrap();
});
}
drop(tx);

let &(ref lock, ref cond) = &*data;
// Yield manually to get tx.send() executed.
thread::yield_now();
rx.recv().unwrap();

let mut cnt = lock.lock();
*cnt = 0;
cond.notify_all();
drop(cnt);

for _ in 0..NUM_TASKS {
// Yield manually to get tx.send() executed.
thread::yield_now();
rx.recv().unwrap();
}
}

#[test]
fn wait_while() {
let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

let pair = Arc::new((Mutex::new(false), Condvar::new()));
let pair2 = pair.clone();

// Inside of our lock, spawn a new thread, and then wait for it to start.
thread::spawn(move || {
let &(ref lock, ref cvar) = &*pair2;
let mut started = lock.lock();
*started = true;
// We notify the condvar that the value has changed.
cvar.notify_one();
});

// Wait for the thread to start up.
let &(ref lock, ref cvar) = &*pair;
let guard = cvar.wait_while(lock.lock(), |started| !*started);
assert!(*guard);
}
10 changes: 10 additions & 0 deletions modules/axsync/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,3 +42,13 @@ pub use self::rwlock::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
pub use semaphore::Semaphore;

#[cfg(test)]
mod tests {
use std::sync::{Mutex, Once};

/// Used for initializing the only global scheduler for test environment.
pub static INIT: Once = Once::new();
/// Used for serializing the tests in this crate.
pub static SEQ: Mutex<()> = Mutex::new(());
}
Loading

0 comments on commit 4fc3b8b

Please sign in to comment.