From e7b7c2e7c8ba69af29e154dbb0feecccd91bcb5a Mon Sep 17 00:00:00 2001 From: Ratakor Date: Tue, 21 Nov 2023 11:14:17 +0100 Subject: [PATCH] Improve spinlock and sched.yield --- kernel/lib/lock.zig | 51 +++++++++++++++------------------------------ kernel/sched.zig | 25 +++++++++++----------- 2 files changed, 30 insertions(+), 46 deletions(-) diff --git a/kernel/lib/lock.zig b/kernel/lib/lock.zig index 1ad8647..6962004 100644 --- a/kernel/lib/lock.zig +++ b/kernel/lib/lock.zig @@ -1,51 +1,34 @@ -const std = @import("std"); -const builtin = @import("builtin"); +const atomic = @import("std").atomic; +/// Test and test-and-set spinlock pub const SpinLock = struct { - state: State = State.init(unlocked), + state: State = State.init(.unlocked), - const State = std.atomic.Atomic(u32); - const unlocked = 0b00; - const locked = 0b01; - const contended = 0b11; // TODO: use contended for lockSlow / unlock + const State = atomic.Atomic(enum(u32) { unlocked, locked }); /// return true on success pub inline fn tryLock(self: *SpinLock) bool { - return self.lockFast("compareAndSwap"); + return if (self.isUnlocked()) self.lockImpl("compareAndSwap") else false; } - pub inline fn lock(self: *SpinLock) void { - if (!self.lockFast("tryCompareAndSwap")) { - self.lockSlow(); + pub fn lock(self: *SpinLock) void { + while (!self.lockImpl("tryCompareAndSwap")) { + while (!self.isUnlocked()) { + atomic.spinLoopHint(); + } } } - inline fn lockFast(self: *SpinLock, comptime cas_fn_name: []const u8) bool { - // optimization for x86 - if (comptime builtin.target.cpu.arch.isX86()) { - const locked_bit = @ctz(@as(u32, locked)); - return self.state.bitSet(locked_bit, .Acquire) == 0; - } - - const casFn = @field(@TypeOf(self.state), cas_fn_name); - return casFn(&self.state, unlocked, locked, .Acquire, .Monotonic) == null; + pub inline fn unlock(self: *SpinLock) void { + self.state.store(.unlocked, .Release); } - noinline fn lockSlow(self: *SpinLock) void { - @setCold(true); - - for (0..100_000_000) |_| { - if (self.lockFast("tryCompareAndSwap")) { - return; - } - std.atomic.spinLoopHint(); - } - - @panic("Deadlock"); + inline fn isUnlocked(self: *SpinLock) bool { + return self.state.load(.Monotonic) == .unlocked; } - pub inline fn unlock(self: *SpinLock) void { - const state = self.state.swap(unlocked, .Release); - std.debug.assert(state != unlocked); + inline fn lockImpl(self: *SpinLock, comptime cas_fn_name: []const u8) bool { + const casFn = @field(@TypeOf(self.state), cas_fn_name); + return casFn(&self.state, .unlocked, .locked, .Acquire, .Monotonic) == null; } }; diff --git a/kernel/sched.zig b/kernel/sched.zig index 774c584..15ece32 100644 --- a/kernel/sched.zig +++ b/kernel/sched.zig @@ -465,6 +465,19 @@ fn schedHandler(ctx: *arch.Context) callconv(.SysV) void { unreachable; } +// TODO?: add comptime mode: .resched, .block ( or .await), .die <- replace die() +pub fn yield() noreturn { + std.debug.assert(arch.interruptState() == false); + // arch.disableInterrupts(); + + apic.timerStop(); + smp.thisCpu().current_thread = null; // TODO: removing this line causes kernel panic in debug build + apic.sendIPI(undefined, .{ .vector = sched_vector, .destination_shorthand = .self }); + + arch.enableInterrupts(); + arch.halt(); +} + // TODO: wake is the same but reverse fn blockThread(thread: *Thread) void { dequeue(thread); @@ -473,18 +486,6 @@ fn blockThread(thread: *Thread) void { sched_lock.unlock(); } -pub fn yield() noreturn { - arch.disableInterrupts(); - apic.timerStop(); - - const cpu = smp.thisCpu(); - cpu.current_thread = null; - apic.sendIPI(cpu.lapic_id, .{ .vector = sched_vector }); - - arch.enableInterrupts(); - arch.halt(); -} - // TODO pub fn yieldAwait() void { std.debug.assert(arch.interruptState() == false);