Skip to content

Commit

Permalink
std/sync/atomic: reimplement memory order options
Browse files Browse the repository at this point in the history
  • Loading branch information
mertcandav committed Dec 3, 2024
1 parent 577b2bf commit 9789d83
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 55 deletions.
2 changes: 1 addition & 1 deletion std/runtime/atomic.jule
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD 3-Clause
// license that can be found in the LICENSE file.

type atomicMemoryOrder = int
type atomicMemoryOrder: int

const atomicRelaxed = atomicMemoryOrder(0)
const atomicConsume = atomicMemoryOrder(1)
Expand Down
84 changes: 41 additions & 43 deletions std/sync/atomic/atomic.jule
Original file line number Diff line number Diff line change
Expand Up @@ -4,67 +4,65 @@

use "std/runtime"

// Memory order for atomic operations.
// Specifies how memory accesses.
enum MemoryOrder {
// The weakest memory order.
// There no synchronization or ordering on read/write access.
// Only the operation is guaranteed to be atomic.
// Usually performs fastest atomicity performance.
Relaxed: runtime::atomicRelaxed,

// Combined with a load, if the loaded value was written
// by a store operation with a Release or stronger order,
// all subsequent operations are ordered after that store.
// Especially all subsequent uploads will see the data
// written before the repository.
Acquire: runtime::atomicAcquire,

// When combined with a store, all previous operations are
// ordered with the Acquire or stronger order before any load
// of that value. In particular, all previous writes become
// visible to all threads that perform an Acquire or stronger
// load of this value.
Release: runtime::atomicRelease,

// Acquire and Release combined.
// Aka acquire/release.
// For loads it uses Acquire, for stores it uses Release ordering.
AcqRel: runtime::atomicAcqRel,

// Default memory order for most things.
// Aka sequentially consistent.
// Operations are sequenced consistently.
SeqCst: runtime::atomicSeqCst,
}
type memoryOrder = runtime::atomicMemoryOrder

// The weakest memory order.
// There no synchronization or ordering on read/write access.
// Only the operation is guaranteed to be atomic.
// Usually performs fastest atomicity performance.
const Relaxed = memoryOrder(runtime::atomicRelaxed)

// Combined with a load, if the loaded value was written
// by a store operation with a Release or stronger order,
// all subsequent operations are ordered after that store.
// Especially all subsequent uploads will see the data
// written before the repository.
const Acquire = memoryOrder(runtime::atomicAcquire)

// When combined with a store, all previous operations are
// ordered with the Acquire or stronger order before any load
// of that value. In particular, all previous writes become
// visible to all threads that perform an Acquire or stronger
// load of this value.
const Release = memoryOrder(runtime::atomicRelease)

// Acquire and Release combined.
// Aka acquire/release.
// For loads it uses Acquire, for stores it uses Release ordering.
const AcqRel = memoryOrder(runtime::atomicAcqRel)

// Default memory order for most things.
// Aka sequentially consistent.
// Operations are sequenced consistently.
const SeqCst = memoryOrder(runtime::atomicSeqCst)

struct number[T] {
n: T
}

impl number {
// Atomically stores new value and returns the previous value.
fn Swap(mut self, new: T, order: MemoryOrder): (old: T) {
fn Swap(mut self, new: T, order: memoryOrder): (old: T) {
ret unsafe { runtime::atomicSwap[T](&self.n, &new, order) }
}

// Executes the compare-and-swap operation.
fn CompareSwap(mut self, old: T, new: T, order: MemoryOrder): (swapped: bool) {
fn CompareSwap(mut self, old: T, new: T, order: memoryOrder): (swapped: bool) {
ret unsafe { runtime::atomicCompareSwap[T](&self.n, &old, &new, order, order) }
}

// Atomically adds delta to value and returns the previous value.
fn Add(mut self, delta: T, order: MemoryOrder): (old: T) {
fn Add(mut self, delta: T, order: memoryOrder): (old: T) {
ret unsafe { runtime::atomicAdd[T](&self.n, delta, order) }
}

// Atomically reads and returns value.
fn Load(self, order: MemoryOrder): T {
fn Load(self, order: memoryOrder): T {
ret unsafe { runtime::atomicLoad[T](&self.n, order) }
}

// Atomically assigns to value.
fn Store(mut self, val: T, order: MemoryOrder) {
fn Store(mut self, val: T, order: memoryOrder) {
unsafe { runtime::atomicStore[T](&self.n, &val, order) }
}
}
Expand Down Expand Up @@ -111,30 +109,30 @@ type Uintptr = number[uintptr]

// Atomically stores new into addr and returns the previous addr value.
// Only integer types are supported.
fn Swap[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, new: T, order: MemoryOrder): (old: T) {
fn Swap[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, new: T, order: memoryOrder): (old: T) {
ret unsafe { runtime::atomicSwap[T](&addr, &new, order) }
}

// Executes the compare-and-swap operation for value.
// Only integer types are supported.
fn CompareSwap[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, old: T, new: T, order: MemoryOrder): (swapped: bool) {
fn CompareSwap[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, old: T, new: T, order: memoryOrder): (swapped: bool) {
ret unsafe { runtime::atomicCompareSwap[T](&addr, &old, &new, order, order) }
}

// Atomically adds delta to addr and returns the previous addr value.
// Only integer types are supported.
fn Add[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, delta: T, order: MemoryOrder): (old: T) {
fn Add[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, delta: T, order: memoryOrder): (old: T) {
ret unsafe { runtime::atomicAdd[T](&addr, delta, order) }
}

// Atomically loads addr.
// Only integer types are supported.
fn Load[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](&addr: T, order: MemoryOrder): T {
fn Load[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](&addr: T, order: memoryOrder): T {
ret unsafe { runtime::atomicLoad[T](&addr, order) }
}

// Atomically stores val into addr.
// Only integer types are supported.
fn Store[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, val: T, order: MemoryOrder) {
fn Store[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, val: T, order: memoryOrder) {
unsafe { runtime::atomicStore[T](&addr, &val, order) }
}
4 changes: 2 additions & 2 deletions std/sync/mutex.jule
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ impl Mutex {
// Unlock the mutex you locked and make it open
// to locking by the thread.
fn Unlock(self) {
old := atomic::Add(self.state, -mutexLocked, atomic::MemoryOrder.SeqCst)
old := atomic::Add(self.state, -mutexLocked, atomic::SeqCst)
if old == 0 {
panic("sync: Mutex: unlock of unlocked mutex")
}
Expand All @@ -47,6 +47,6 @@ impl Mutex {
// to lock. Returns true if the locking was
// successful, false otherwise.
fn TryLock(self): bool {
ret atomic::CompareSwap(self.state, mutexUnlocked, mutexLocked, atomic::MemoryOrder.SeqCst)
ret atomic::CompareSwap(self.state, mutexUnlocked, mutexLocked, atomic::SeqCst)
}
}
6 changes: 3 additions & 3 deletions std/sync/once.jule
Original file line number Diff line number Diff line change
Expand Up @@ -90,17 +90,17 @@ impl Once {
// This is why the slow path falls back to a mutex, and why
// the self.done.store must be delayed until after f returns.

if self.done.Load(atomic::MemoryOrder.Relaxed) == 0 {
if self.done.Load(atomic::Relaxed) == 0 {
// Outlined slow-path to allow inlining of the fast-path.
self.doSlow(f)
}
}

fn doSlow(self, f: fn()) {
self.m.Lock()
if self.done.Load(atomic::MemoryOrder.Relaxed) == 0 {
if self.done.Load(atomic::Relaxed) == 0 {
f()
self.done.Store(1, atomic::MemoryOrder.Relaxed)
self.done.Store(1, atomic::Relaxed)
}
self.m.Unlock()
}
Expand Down
12 changes: 6 additions & 6 deletions std/sync/waitgroup.jule
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ impl WaitGroup {
// and unblocks any wait() calls if task count becomes zero.
// Panics if task count reaches below zero.
fn Add(mut self, delta: int) {
oldTask := int(self.taskN.Add(u32(delta), atomic::MemoryOrder.Relaxed))
oldTask := int(self.taskN.Add(u32(delta), atomic::Relaxed))
nTask := oldTask + delta
if nTask < 0 {
panic("sync: WaitGroup.Add: negative number of tasks")
Expand All @@ -42,12 +42,12 @@ impl WaitGroup {

// Number of tasks reaches to zero, therefore clear waiters.
for {
nWaiters := self.waitN.Load(atomic::MemoryOrder.Relaxed)
nWaiters := self.waitN.Load(atomic::Relaxed)
if nWaiters == 0 {
ret
}

if self.waitN.CompareSwap(nWaiters, 0, atomic::MemoryOrder.Relaxed) {
if self.waitN.CompareSwap(nWaiters, 0, atomic::Relaxed) {
ret
}
}
Expand All @@ -58,17 +58,17 @@ impl WaitGroup {

// Blocks until all tasks are done (task count becomes zero)
fn Wait(mut self) {
nTask := self.taskN.Load(atomic::MemoryOrder.Relaxed)
nTask := self.taskN.Load(atomic::Relaxed)
if nTask == 0 {
// No task, no need to wait.
ret
}

// Register this wait call to waiters.
self.waitN.Add(1, atomic::MemoryOrder.Relaxed)
self.waitN.Add(1, atomic::Relaxed)

// Wait for clearing waiters.
for self.waitN.Load(atomic::MemoryOrder.Relaxed) != 0 {
for self.waitN.Load(atomic::Relaxed) != 0 {
}
}
}

0 comments on commit 9789d83

Please sign in to comment.