diff --git a/std/runtime/atomic.jule b/std/runtime/atomic.jule index 0bf83907..f122a447 100644 --- a/std/runtime/atomic.jule +++ b/std/runtime/atomic.jule @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD 3-Clause // license that can be found in the LICENSE file. -type atomicMemoryOrder = int +type atomicMemoryOrder: int const atomicRelaxed = atomicMemoryOrder(0) const atomicConsume = atomicMemoryOrder(1) diff --git a/std/sync/atomic/atomic.jule b/std/sync/atomic/atomic.jule index 4ff7e572..ba031e02 100644 --- a/std/sync/atomic/atomic.jule +++ b/std/sync/atomic/atomic.jule @@ -4,39 +4,37 @@ use "std/runtime" -// Memory order for atomic operations. -// Specifies how memory accesses. -enum MemoryOrder { - // The weakest memory order. - // There no synchronization or ordering on read/write access. - // Only the operation is guaranteed to be atomic. - // Usually performs fastest atomicity performance. - Relaxed: runtime::atomicRelaxed, - - // Combined with a load, if the loaded value was written - // by a store operation with a Release or stronger order, - // all subsequent operations are ordered after that store. - // Especially all subsequent uploads will see the data - // written before the repository. - Acquire: runtime::atomicAcquire, - - // When combined with a store, all previous operations are - // ordered with the Acquire or stronger order before any load - // of that value. In particular, all previous writes become - // visible to all threads that perform an Acquire or stronger - // load of this value. - Release: runtime::atomicRelease, - - // Acquire and Release combined. - // Aka acquire/release. - // For loads it uses Acquire, for stores it uses Release ordering. - AcqRel: runtime::atomicAcqRel, - - // Default memory order for most things. - // Aka sequentially consistent. - // Operations are sequenced consistently. - SeqCst: runtime::atomicSeqCst, -} +type memoryOrder = runtime::atomicMemoryOrder + +// The weakest memory order. +// There no synchronization or ordering on read/write access. +// Only the operation is guaranteed to be atomic. +// Usually performs fastest atomicity performance. +const Relaxed = memoryOrder(runtime::atomicRelaxed) + +// Combined with a load, if the loaded value was written +// by a store operation with a Release or stronger order, +// all subsequent operations are ordered after that store. +// Especially all subsequent uploads will see the data +// written before the repository. +const Acquire = memoryOrder(runtime::atomicAcquire) + +// When combined with a store, all previous operations are +// ordered with the Acquire or stronger order before any load +// of that value. In particular, all previous writes become +// visible to all threads that perform an Acquire or stronger +// load of this value. +const Release = memoryOrder(runtime::atomicRelease) + +// Acquire and Release combined. +// Aka acquire/release. +// For loads it uses Acquire, for stores it uses Release ordering. +const AcqRel = memoryOrder(runtime::atomicAcqRel) + +// Default memory order for most things. +// Aka sequentially consistent. +// Operations are sequenced consistently. +const SeqCst = memoryOrder(runtime::atomicSeqCst) struct number[T] { n: T @@ -44,27 +42,27 @@ struct number[T] { impl number { // Atomically stores new value and returns the previous value. - fn Swap(mut self, new: T, order: MemoryOrder): (old: T) { + fn Swap(mut self, new: T, order: memoryOrder): (old: T) { ret unsafe { runtime::atomicSwap[T](&self.n, &new, order) } } // Executes the compare-and-swap operation. - fn CompareSwap(mut self, old: T, new: T, order: MemoryOrder): (swapped: bool) { + fn CompareSwap(mut self, old: T, new: T, order: memoryOrder): (swapped: bool) { ret unsafe { runtime::atomicCompareSwap[T](&self.n, &old, &new, order, order) } } // Atomically adds delta to value and returns the previous value. - fn Add(mut self, delta: T, order: MemoryOrder): (old: T) { + fn Add(mut self, delta: T, order: memoryOrder): (old: T) { ret unsafe { runtime::atomicAdd[T](&self.n, delta, order) } } // Atomically reads and returns value. - fn Load(self, order: MemoryOrder): T { + fn Load(self, order: memoryOrder): T { ret unsafe { runtime::atomicLoad[T](&self.n, order) } } // Atomically assigns to value. - fn Store(mut self, val: T, order: MemoryOrder) { + fn Store(mut self, val: T, order: memoryOrder) { unsafe { runtime::atomicStore[T](&self.n, &val, order) } } } @@ -111,30 +109,30 @@ type Uintptr = number[uintptr] // Atomically stores new into addr and returns the previous addr value. // Only integer types are supported. -fn Swap[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, new: T, order: MemoryOrder): (old: T) { +fn Swap[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, new: T, order: memoryOrder): (old: T) { ret unsafe { runtime::atomicSwap[T](&addr, &new, order) } } // Executes the compare-and-swap operation for value. // Only integer types are supported. -fn CompareSwap[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, old: T, new: T, order: MemoryOrder): (swapped: bool) { +fn CompareSwap[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, old: T, new: T, order: memoryOrder): (swapped: bool) { ret unsafe { runtime::atomicCompareSwap[T](&addr, &old, &new, order, order) } } // Atomically adds delta to addr and returns the previous addr value. // Only integer types are supported. -fn Add[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, delta: T, order: MemoryOrder): (old: T) { +fn Add[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, delta: T, order: memoryOrder): (old: T) { ret unsafe { runtime::atomicAdd[T](&addr, delta, order) } } // Atomically loads addr. // Only integer types are supported. -fn Load[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](&addr: T, order: MemoryOrder): T { +fn Load[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](&addr: T, order: memoryOrder): T { ret unsafe { runtime::atomicLoad[T](&addr, order) } } // Atomically stores val into addr. // Only integer types are supported. -fn Store[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, val: T, order: MemoryOrder) { +fn Store[T: int | uint | i8 | i16 | i32 | i64 | u8 | u16 | u32 | u64 | uintptr](mut &addr: T, val: T, order: memoryOrder) { unsafe { runtime::atomicStore[T](&addr, &val, order) } } \ No newline at end of file diff --git a/std/sync/mutex.jule b/std/sync/mutex.jule index 7aec2007..873ea2c9 100644 --- a/std/sync/mutex.jule +++ b/std/sync/mutex.jule @@ -36,7 +36,7 @@ impl Mutex { // Unlock the mutex you locked and make it open // to locking by the thread. fn Unlock(self) { - old := atomic::Add(self.state, -mutexLocked, atomic::MemoryOrder.SeqCst) + old := atomic::Add(self.state, -mutexLocked, atomic::SeqCst) if old == 0 { panic("sync: Mutex: unlock of unlocked mutex") } @@ -47,6 +47,6 @@ impl Mutex { // to lock. Returns true if the locking was // successful, false otherwise. fn TryLock(self): bool { - ret atomic::CompareSwap(self.state, mutexUnlocked, mutexLocked, atomic::MemoryOrder.SeqCst) + ret atomic::CompareSwap(self.state, mutexUnlocked, mutexLocked, atomic::SeqCst) } } \ No newline at end of file diff --git a/std/sync/once.jule b/std/sync/once.jule index c45fa4c3..1b19c7a1 100644 --- a/std/sync/once.jule +++ b/std/sync/once.jule @@ -90,7 +90,7 @@ impl Once { // This is why the slow path falls back to a mutex, and why // the self.done.store must be delayed until after f returns. - if self.done.Load(atomic::MemoryOrder.Relaxed) == 0 { + if self.done.Load(atomic::Relaxed) == 0 { // Outlined slow-path to allow inlining of the fast-path. self.doSlow(f) } @@ -98,9 +98,9 @@ impl Once { fn doSlow(self, f: fn()) { self.m.Lock() - if self.done.Load(atomic::MemoryOrder.Relaxed) == 0 { + if self.done.Load(atomic::Relaxed) == 0 { f() - self.done.Store(1, atomic::MemoryOrder.Relaxed) + self.done.Store(1, atomic::Relaxed) } self.m.Unlock() } diff --git a/std/sync/waitgroup.jule b/std/sync/waitgroup.jule index 6b585786..1290ebb4 100644 --- a/std/sync/waitgroup.jule +++ b/std/sync/waitgroup.jule @@ -28,7 +28,7 @@ impl WaitGroup { // and unblocks any wait() calls if task count becomes zero. // Panics if task count reaches below zero. fn Add(mut self, delta: int) { - oldTask := int(self.taskN.Add(u32(delta), atomic::MemoryOrder.Relaxed)) + oldTask := int(self.taskN.Add(u32(delta), atomic::Relaxed)) nTask := oldTask + delta if nTask < 0 { panic("sync: WaitGroup.Add: negative number of tasks") @@ -42,12 +42,12 @@ impl WaitGroup { // Number of tasks reaches to zero, therefore clear waiters. for { - nWaiters := self.waitN.Load(atomic::MemoryOrder.Relaxed) + nWaiters := self.waitN.Load(atomic::Relaxed) if nWaiters == 0 { ret } - if self.waitN.CompareSwap(nWaiters, 0, atomic::MemoryOrder.Relaxed) { + if self.waitN.CompareSwap(nWaiters, 0, atomic::Relaxed) { ret } } @@ -58,17 +58,17 @@ impl WaitGroup { // Blocks until all tasks are done (task count becomes zero) fn Wait(mut self) { - nTask := self.taskN.Load(atomic::MemoryOrder.Relaxed) + nTask := self.taskN.Load(atomic::Relaxed) if nTask == 0 { // No task, no need to wait. ret } // Register this wait call to waiters. - self.waitN.Add(1, atomic::MemoryOrder.Relaxed) + self.waitN.Add(1, atomic::Relaxed) // Wait for clearing waiters. - for self.waitN.Load(atomic::MemoryOrder.Relaxed) != 0 { + for self.waitN.Load(atomic::Relaxed) != 0 { } } } \ No newline at end of file