Skip to content

Commit

Permalink
Update limine + fix pid + start user threads
Browse files Browse the repository at this point in the history
  • Loading branch information
Ratakor committed Oct 24, 2023
1 parent 9da0050 commit 9ffc718
Show file tree
Hide file tree
Showing 6 changed files with 213 additions and 50 deletions.
12 changes: 9 additions & 3 deletions kernel/arch/x86_64/cpu.zig
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ const gdt = @import("gdt.zig");
const idt = @import("idt.zig");
const apic = @import("apic.zig");
const log = std.log.scoped(.cpu);
const page_size = std.mem.page_size;

// TODO: move all of this in x86_64.zig?

pub const CpuLocal = struct {
id: usize,
Expand All @@ -22,6 +23,7 @@ pub const CpuLocal = struct {
tss: gdt.TSS,

pub const stack_size = 0x10000; // 64KiB
pub const stack_pages = stack_size / std.mem.page_size;

pub fn initCpu(self: *CpuLocal, is_bsp: bool) void {
gdt.reload();
Expand All @@ -38,11 +40,11 @@ pub const CpuLocal = struct {
self.idle_thread = idle_thread;
x86.setGsBase(@intFromPtr(idle_thread));

const common_int_stack_phys = pmm.alloc(stack_size / page_size, true) orelse unreachable;
const common_int_stack_phys = pmm.alloc(stack_pages, true) orelse unreachable;
const common_int_stack = common_int_stack_phys + stack_size + vmm.hhdm_offset;
self.tss.rsp0 = common_int_stack;

const sched_stack_phys = pmm.alloc(stack_size / page_size, true) orelse unreachable;
const sched_stack_phys = pmm.alloc(stack_pages, true) orelse unreachable;
const sched_stack = sched_stack_phys + stack_size + vmm.hhdm_offset;
self.tss.ist1 = sched_stack;

Expand Down Expand Up @@ -211,6 +213,8 @@ const XCR0 = enum(u64) {

pub var use_xsave = false;
pub var fpu_storage_size: usize = 512; // 512 = fxsave storage
// TODO: replace with @divCeil
pub var fpu_storage_pages: usize = 1;

inline fn hasFeature(features: u64, feat: Feature) bool {
return features & @intFromEnum(feat) != 0;
Expand Down Expand Up @@ -264,6 +268,8 @@ fn initFeatures(is_bsp: bool) void {
x86.wrxcr(0, xcr0);

fpu_storage_size = x86.cpuid(0xd, 0).ecx;
// TODO: replace with @divCeil
fpu_storage_pages = std.math.divCeil(usize, fpu_storage_size, std.mem.page_size) catch unreachable;
}

// TODO
Expand Down
8 changes: 4 additions & 4 deletions kernel/linker-x86_64.ld
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ ENTRY(_start)
/* MMU permissions */
PHDRS
{
text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */
rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */
data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */
dynamic PT_DYNAMIC FLAGS((1 << 1) | (1 << 2)) ; /* Dynamic PHDR for relocations */
text PT_LOAD FLAGS((1 << 0) | (1 << 2)); /* Execute + Read */
rodata PT_LOAD FLAGS((1 << 2)); /* Read only */
data PT_LOAD FLAGS((1 << 1) | (1 << 2)); /* Write + Read */
dynamic PT_DYNAMIC FLAGS((1 << 1) | (1 << 2)); /* Dynamic PHDR for relocations */
}

SECTIONS
Expand Down
2 changes: 1 addition & 1 deletion kernel/main.zig
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ fn main() noreturn {
for (resp.modules()) |module| {
std.log.debug("file \"{s}\" contains \"{s}\"", .{ module.path, module.data() });
// std.log.debug("loading {s}", .{module.cmdline});
// const thr = sched.Thread.initKernel(@ptrCast(&module.address), null, 1) catch unreachable;
// const thr = sched.Thread.initUser(
// sched.enqueue(thr) catch unreachable;
}
}
Expand Down
184 changes: 150 additions & 34 deletions kernel/sched.zig
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ const pmm = @import("pmm.zig");
const vmm = @import("vmm.zig");
const SpinLock = @import("SpinLock.zig");
const log = std.log.scoped(.sched);
const page_size = std.mem.page_size;

// TODO: if a process create a lot of thread it can suck all the cpu
// -> check the process of the chosen thread smh to fix that
Expand All @@ -23,7 +22,7 @@ pub const Process = struct {
parent: ?*Process,
addr_space: *vmm.AddressSpace,
// mmap_anon_base: usize,
// thread_stack_top: usize,
thread_stack_top: usize, // TODO
threads: std.ArrayListUnmanaged(*Thread),
children: std.ArrayListUnmanaged(*Process),
// child_events
Expand All @@ -36,11 +35,13 @@ pub const Process = struct {

// running_time: usize,

var next_pid: u32 = 0;

pub fn init(parent: ?*Process, addr_space: ?*vmm.AddressSpace) !*Process {
const proc = try root.allocator.create(Process);
errdefer root.allocator.destroy(proc);

proc.id = @intCast(processes.items.len); // TODO: wrong
proc.id = next_pid;
proc.parent = parent;
proc.threads = .{};
proc.children = .{};
Expand All @@ -65,9 +66,15 @@ pub const Process = struct {
}

try processes.append(root.allocator, proc);
next_pid += 1;

return proc;
}

pub fn deinit(self: *Process) void {
_ = self;
// TODO
}
};

pub const Thread = struct {
Expand All @@ -85,7 +92,6 @@ pub const Thread = struct {
scheduling_off: bool, // TODO: for TLB
enqueued: bool,
// enqueued_by_signal: bool, // TODO: for events
timeslice: u32, // in microseconds
yield_await: SpinLock = .{},
gs_base: u64,
fs_base: u64,
Expand All @@ -102,52 +108,44 @@ pub const Thread = struct {
// running_time: usize,

pub const stack_size = 1024 * 1024; // 1MiB
pub const stack_pages = stack_size / page_size;
pub const stack_pages = stack_size / std.mem.page_size;

// TODO: improve
// use root.allocator instead of pmm.alloc?
pub fn initKernel(func: *const anyopaque, arg: ?*anyopaque, tickets: usize) !*Thread {
const thread = try root.allocator.create(Thread);
errdefer root.allocator.destroy(thread);
errdefer thread.deinit();
@memset(std.mem.asBytes(thread), 0);

thread.self = thread;
thread.process = kernel_process;
thread.tickets = tickets;
// thread.tid = undefined; // normal
thread.lock = .{};
thread.yield_await = .{};
thread.stacks = .{};

const stack_phys = pmm.alloc(stack_pages, true) orelse {
root.allocator.destroy(thread);
return error.OutOfMemory;
};
try thread.stacks.append(root.allocator, stack_phys);
const stack = stack_phys + stack_size + vmm.hhdm_offset;

thread.ctx.cs = gdt.kernel_code;
thread.ctx.ds = gdt.kernel_data;
thread.ctx.es = gdt.kernel_data;
thread.ctx.ss = gdt.kernel_data;
thread.ctx.rflags = @bitCast(arch.Rflags{ .IF = 1 });
thread.ctx.rip = @intFromPtr(func);
thread.ctx.rdi = @intFromPtr(arg);
thread.ctx.rsp = stack;
thread.ctx.rsp = blk: {
const stack_phys = pmm.alloc(stack_pages, true) orelse return error.OutOfMemory;
errdefer pmm.free(stack_phys, stack_pages);
try thread.stacks.append(root.allocator, stack_phys);
break :blk stack_phys + stack_size + vmm.hhdm_offset;
};

thread.cr3 = kernel_process.addr_space.cr3();
thread.gs_base = @intFromPtr(thread);

thread.process = kernel_process;
thread.timeslice = 1000;
// TODO: calculate this once and store it in arch.cpu.fpu_storage_pages
const pages = std.math.divCeil(u64, arch.cpu.fpu_storage_size, page_size) catch unreachable;
const fpu_storage_phys = pmm.alloc(pages, true) orelse {
pmm.free(stack_phys, stack_pages);
root.allocator.destroy(thread);
return error.OutOfMemory;
};
const fpu_storage_phys = pmm.alloc(arch.cpu.fpu_storage_pages, true) orelse return error.OutOfMemory;
thread.fpu_storage = fpu_storage_phys + vmm.hhdm_offset;

// TODO
// kernel_process.threads.append(root.allocator, thread);

std.debug.assert(@intFromPtr(thread) == @intFromPtr(&thread.self));
Expand All @@ -157,30 +155,148 @@ pub const Thread = struct {
}

// TODO: idk if this should be in kernel, it is pthread
pub fn initUser() !*Thread {
@compileError("TODO");
pub fn initUser(
process: *Process,
func: *const anyopaque,
args: ?*anyopaque,
stack_ptr: ?*anyopaque,
argv: [][*:0]u8,
environ: [][*:0]u8,
tickets: usize,
) !*Thread {
const thread = try root.allocator.create(Thread);
errdefer thread.deinit();
@memset(std.mem.asBytes(thread), 0);

thread.self = thread;
thread.process = process;
thread.tickets = tickets;
thread.lock = .{};
thread.enqueued = false;
thread.yield_await = .{};
thread.stacks = .{};

// TODO: ugly + save in threads.stacks so it's easily freed on deinit?
var stack: u64 = undefined;
var stack_vma: u64 = undefined;
if (stack_ptr) |sp| {
stack = @intFromPtr(sp);
stack_vma = @intFromPtr(sp);
} else {
const stack_phys = pmm.alloc(stack_pages, true) orelse return error.OutOfMemory;
errdefer pmm.free(stack_phys, stack_pages);

stack = stack_phys + stack_size + vmm.hhdm_offset;
stack_vma = process.thread_stack_top;
try process.addr_space.mmapRange(
process.thread_stack_top - stack_size,
stack_phys,
stack_size,
vmm.PROT.READ | vmm.PROT.WRITE,
vmm.MAP.ANONYMOUS,
);
process.thread_stack_top -= stack_size - std.mem.page_size;
}

thread.kernel_stack = blk: {
const stack_phys = pmm.alloc(stack_pages, true) orelse return error.OutOfMemory;
errdefer pmm.free(stack_phys, stack_pages);
try thread.stacks.append(root.allocator, stack_phys);
break :blk stack_phys + stack_size + vmm.hhdm_offset;
};

thread.pf_stack = blk: {
const stack_phys = pmm.alloc(stack_pages, true) orelse return error.OutOfMemory;
errdefer pmm.free(stack_phys, stack_pages);
try thread.stacks.append(root.allocator, stack_phys);
break :blk stack_phys + stack_size + vmm.hhdm_offset;
};

thread.ctx.cs = gdt.user_code;
thread.ctx.ds = gdt.user_data;
thread.ctx.es = gdt.user_data;
thread.ctx.ss = gdt.user_data;
thread.ctx.rflags = @bitCast(arch.Rflags{ .IF = 1 });
thread.ctx.rip = @intFromPtr(func);
thread.ctx.rdi = @intFromPtr(args);
thread.ctx.rsp = stack_vma;
thread.cr3 = process.addr_space.cr3();

const fpu_storage_phys = pmm.alloc(arch.cpu.fpu_storage_pages, true) orelse return error.OutOfMemory;
thread.fpu_storage = fpu_storage_phys + vmm.hhdm_offset;

// TODO: set up FPU control word and MXCSR based on SysV ABI
arch.cpu.fpuRestore(thread.fpu_storage);
const default_fcw: u16 = 0b1100111111;
asm volatile (
\\fldcw %[fcw]
:
: [fcw] "m" (default_fcw),
: "memory"
);
const default_mxcsr: u32 = 0b1111110000000;
asm volatile (
\\ldmxcsr %[mxcsr]
:
: [mxcsr] "m" (default_mxcsr),
: "memory"
);
arch.cpu.fpuSave(thread.fpu_storage);

thread.tid = process.threads.items.len; // TODO?

if (process.threads.items.len == 0) {
const stack_top = stack;
_ = stack_top;

for (environ) |entry| {
const len = std.mem.len(entry);
stack = stack - len - 1;
@memcpy(@as(u8, @ptrFromInt(stack)), entry[0..len]);
}

for (argv) |arg| {
const len = std.mem.len(arg);
stack = stack - len - 1;
@memcpy(@as(u8, @ptrFromInt(stack)), arg[0..len]);
}

// TODO
stack = std.mem.alignBackward(stack, 16);
if ((argv.len + environ.len + 1) & 1 != 0) {
stack -= @sizeOf(u64);
}

// TODO: elf
// TODO: stuff
}

try process.threads.append(root.allocator, thread);

@compileError("not finished yet");
// return thread;
}

// TODO
pub fn deinit(self: *Thread) void {
for (self.stacks.items) |stack| {
pmm.free(stack, stack_pages);
}
// TODO: calculate this once and store it in arch.cpu.fpu_storage_pages
const pages = std.math.divCeil(u64, arch.cpu.fpu_storage_size, page_size) catch unreachable;
pmm.free(self.fpu_storage - vmm.hhdm_offset, pages);
self.stacks.deinit(root.allocator);
pmm.free(self.fpu_storage - vmm.hhdm_offset, arch.cpu.fpu_storage_pages);
root.allocator.destroy(self);
}
};

pub const timeslice = 1000; // reschedule every 1ms

pub var kernel_process: *Process = undefined;
pub var processes: std.ArrayListUnmanaged(*Process) = .{};
var sched_vector: u8 = undefined;
var running_threads: std.ArrayListUnmanaged(*Thread) = .{};
var sched_lock: SpinLock = .{};
var total_tickets: usize = 0;
var pcg: rand.Pcg = undefined;
const random: rand.Random = pcg.random();

pub fn init() void {
pcg = rand.Pcg.init(rand.getSeedSlow());
Expand All @@ -200,7 +316,7 @@ pub inline fn currentThread() *Thread {

// O(n)
fn nextThread() ?*Thread {
const ticket = random.uintLessThan(usize, total_tickets + 1);
const ticket = pcg.random().uintLessThan(usize, total_tickets + 1);
var sum: usize = 0;

sched_lock.lock();
Expand Down Expand Up @@ -273,7 +389,7 @@ fn schedHandler(ctx: *arch.Context) callconv(.SysV) void {
// TODO
if (current_thread.scheduling_off) {
apic.eoi();
apic.timerOneShot(current_thread.timeslice, sched_vector);
apic.timerOneShot(timeslice, sched_vector);
return;
}

Expand All @@ -286,7 +402,7 @@ fn schedHandler(ctx: *arch.Context) callconv(.SysV) void {

if (next_thread == null and current_thread.enqueued) {
apic.eoi();
apic.timerOneShot(current_thread.timeslice, sched_vector);
apic.timerOneShot(timeslice, sched_vector);
return;
}

Expand Down Expand Up @@ -345,13 +461,13 @@ fn schedHandler(ctx: *arch.Context) callconv(.SysV) void {
current_thread.cpu = cpu;

apic.eoi();
apic.timerOneShot(current_thread.timeslice, sched_vector);
apic.timerOneShot(timeslice, sched_vector);
contextSwitch(&current_thread.ctx);
}

pub fn wait() noreturn {
arch.disableInterrupts();
apic.timerOneShot(10000, sched_vector);
apic.timerOneShot(timeslice * 5, sched_vector);
arch.enableInterrupts();
arch.halt();
}
Expand Down
Loading

0 comments on commit 9ffc718

Please sign in to comment.