From a781ca3c387630784a832f448ea213a975ad440a Mon Sep 17 00:00:00 2001
From: Ratakor <ratakor@disroot.org>
Date: Thu, 19 Oct 2023 13:51:23 +0200
Subject: [PATCH] Add comptime assert for types + pmm.reclaimMemory

---
 kernel/SpinLock.zig           |   8 ++-
 kernel/arch/x86_64/cpu.zig    |   8 ++-
 kernel/arch/x86_64/gdt.zig    |  13 +++-
 kernel/arch/x86_64/x86_64.zig |   7 +++
 kernel/debug.zig              |   2 +
 kernel/main.zig               | 111 +++++++++++++++++-----------------
 kernel/pmm.zig                |  25 ++++++--
 kernel/sched.zig              |  43 ++++++++-----
 kernel/smp.zig                |   3 -
 kernel/time.zig               |   2 +-
 kernel/vmm.zig                |  44 ++++++++------
 11 files changed, 163 insertions(+), 103 deletions(-)

diff --git a/kernel/SpinLock.zig b/kernel/SpinLock.zig
index b81a936..b464456 100644
--- a/kernel/SpinLock.zig
+++ b/kernel/SpinLock.zig
@@ -35,7 +35,9 @@ inline fn lockFast(self: *SpinLock, comptime cas_fn_name: []const u8) bool {
 fn lockSlow(self: *SpinLock) void {
     @setCold(true);
 
-    for (0..100_000_000) |_| {
+    // TODO
+    // for (0..100_000_000) |_| {
+    while (true) {
         if (self.lockFast("tryCompareAndSwap")) {
             return;
         }
@@ -47,5 +49,7 @@ fn lockSlow(self: *SpinLock) void {
 
 pub fn unlock(self: *SpinLock) void {
     const state = self.state.swap(unlocked, .Release);
-    std.debug.assert(state == locked);
+    // TODO
+    _ = state;
+    // std.debug.assert(state == locked);
 }
diff --git a/kernel/arch/x86_64/cpu.zig b/kernel/arch/x86_64/cpu.zig
index b0a18d1..61e1daa 100644
--- a/kernel/arch/x86_64/cpu.zig
+++ b/kernel/arch/x86_64/cpu.zig
@@ -1,9 +1,10 @@
+const std = @import("std");
 const limine = @import("limine");
 const root = @import("root");
 const x86 = @import("x86_64.zig");
 const gdt = @import("idt.zig");
 const idt = @import("idt.zig");
-const log = @import("std").log.scoped(.cpu);
+const log = std.log.scoped(.cpu);
 
 const PAT = packed struct {
     // zig fmt: off
@@ -25,6 +26,11 @@ const PAT = packed struct {
         write_back = 6,
         uncached = 7,
     };
+
+    comptime {
+        std.debug.assert(@sizeOf(PAT) == @sizeOf(u64));
+        std.debug.assert(@bitSizeOf(PAT) == @bitSizeOf(u64));
+    }
 };
 
 /// https://en.wikipedia.org/wiki/CPUID#EAX=1:_Processor_Info_and_Feature_Bits
diff --git a/kernel/arch/x86_64/gdt.zig b/kernel/arch/x86_64/gdt.zig
index ca45d69..8436bf0 100644
--- a/kernel/arch/x86_64/gdt.zig
+++ b/kernel/arch/x86_64/gdt.zig
@@ -1,4 +1,5 @@
-const log = @import("std").log.scoped(.gdt);
+const std = @import("std");
+const log = std.log.scoped(.gdt);
 const SpinLock = @import("root").SpinLock;
 
 const GDTEntry = packed struct {
@@ -9,6 +10,11 @@ const GDTEntry = packed struct {
     limit_high: u4 = 0,
     flags: u4 = 0,
     base_high: u8 = 0,
+
+    comptime {
+        std.debug.assert(@sizeOf(GDTEntry) == @sizeOf(u64));
+        std.debug.assert(@bitSizeOf(GDTEntry) == @bitSizeOf(u64));
+    }
 };
 
 const TSSDescriptor = packed struct {
@@ -21,6 +27,11 @@ const TSSDescriptor = packed struct {
     base_high: u8 = undefined,
     base_upper: u32 = undefined,
     reserved: u32 = 0,
+
+    comptime {
+        std.debug.assert(@sizeOf(TSSDescriptor) == @sizeOf(u64) * 2);
+        std.debug.assert(@bitSizeOf(TSSDescriptor) == @bitSizeOf(u64) * 2);
+    }
 };
 
 /// Task State Segment
diff --git a/kernel/arch/x86_64/x86_64.zig b/kernel/arch/x86_64/x86_64.zig
index 0538e26..be00f25 100644
--- a/kernel/arch/x86_64/x86_64.zig
+++ b/kernel/arch/x86_64/x86_64.zig
@@ -1,3 +1,5 @@
+const assert = @import("std").debug.assert;
+
 pub const Rflags = packed struct {
     CF: u1 = 0,
     reserved: u1 = 1,
@@ -21,6 +23,11 @@ pub const Rflags = packed struct {
     VIP: u1 = 0,
     ID: u1 = 0,
     reserved4: u42 = 0,
+
+    comptime {
+        assert(@sizeOf(Rflags) == @sizeOf(u64));
+        assert(@bitSizeOf(Rflags) == @bitSizeOf(u64));
+    }
 };
 
 pub const CPUID = struct {
diff --git a/kernel/debug.zig b/kernel/debug.zig
index 0f96fcf..61ee556 100644
--- a/kernel/debug.zig
+++ b/kernel/debug.zig
@@ -52,6 +52,8 @@ pub fn panic(msg: []const u8, _: ?*std.builtin.StackTrace, _: ?usize) noreturn {
         arch.halt();
     }
 
+    // TODO: send signal to other cpu to stop them
+
     const fmt = "\x1b[m\x1b[31m\nKernel panic:\x1b[m {s}\n";
     if (root.tty0) |tty| {
         const writer = tty.writer();
diff --git a/kernel/main.zig b/kernel/main.zig
index 04d24eb..4a7578d 100644
--- a/kernel/main.zig
+++ b/kernel/main.zig
@@ -77,25 +77,22 @@ export fn _start() noreturn {
     smp.init();
     time.init();
 
-    // TODO: crash due to page fault
-    // const kernel_thread = sched.Thread.initKernel(@ptrCast(&main), null, 1) catch unreachable;
-    // sched.enqueue(kernel_thread) catch unreachable;
-
-    arch.enableInterrupts();
+    const kernel_thread = sched.Thread.initKernel(@ptrCast(&main), null, 1) catch unreachable;
+    sched.enqueue(kernel_thread) catch unreachable;
     sched.wait();
 }
 
 fn main() !void {
-    const fb = framebuffer_request.response.?.framebuffers()[0];
-    tty0 = TTY.init(fb.address, fb.width, fb.height, callback) catch unreachable;
+    // const fb = framebuffer_request.response.?.framebuffers()[0];
+    // tty0 = TTY.init(fb.address, fb.width, fb.height, callback) catch unreachable;
 
-    const boot_info = boot_info_request.response.?;
-    tty0.?.writer().print("Welcome to Ubik, brought to you by {s} {s} :)\n", .{
-        boot_info.name,
-        boot_info.version,
-    }) catch unreachable;
+    // const boot_info = boot_info_request.response.?;
+    // tty0.?.writer().print("Welcome to Ubik, brought to you by {s} {s} :)\n", .{
+    //     boot_info.name,
+    //     boot_info.version,
+    // }) catch unreachable;
 
-    ps2.init();
+    // ps2.init();
     // TODO: pci
     // TODO: vfs
     // TODO: basic syscalls
@@ -108,48 +105,52 @@ fn main() !void {
     // TODO: filesystem
     // TODO: IPC: pipe, socket (TCP, UDP, Unix)
 
-    var regs = arch.cpuid(0, 0);
-    std.log.debug("vendor string: {s}{s}{s}", .{
-        @as([*]const u8, @ptrCast(&regs.ebx))[0..4],
-        @as([*]const u8, @ptrCast(&regs.edx))[0..4],
-        @as([*]const u8, @ptrCast(&regs.ecx))[0..4],
-    });
-
-    regs = arch.cpuid(0x80000000, 0);
-    if (regs.eax >= 0x80000004) {
-        regs = arch.cpuid(0x80000002, 0);
-        serial.writer.print("cpu name: {s}{s}{s}{s}", .{
-            @as([*]const u8, @ptrCast(&regs.eax))[0..4],
-            @as([*]const u8, @ptrCast(&regs.ebx))[0..4],
-            @as([*]const u8, @ptrCast(&regs.ecx))[0..4],
-            @as([*]const u8, @ptrCast(&regs.edx))[0..4],
-        }) catch unreachable;
-        regs = arch.cpuid(0x80000003, 0);
-        serial.writer.print("{s}{s}{s}{s}", .{
-            @as([*]const u8, @ptrCast(&regs.eax))[0..4],
-            @as([*]const u8, @ptrCast(&regs.ebx))[0..4],
-            @as([*]const u8, @ptrCast(&regs.ecx))[0..4],
-            @as([*]const u8, @ptrCast(&regs.edx))[0..4],
-        }) catch unreachable;
-        regs = arch.cpuid(0x80000004, 0);
-        serial.writer.print("{s}{s}{s}{s}\n", .{
-            @as([*]const u8, @ptrCast(&regs.eax))[0..4],
-            @as([*]const u8, @ptrCast(&regs.ebx))[0..4],
-            @as([*]const u8, @ptrCast(&regs.ecx))[0..4],
-            @as([*]const u8, @ptrCast(&regs.edx))[0..4],
-        }) catch unreachable;
-    }
-
-    const rand = @import("rand.zig");
-    var pcg = rand.Pcg.init(rand.getSeedSlow());
-    inline for (0..8) |_| {
-        const thread = sched.Thread.initKernel(
-            @ptrCast(&hihihi),
-            null,
-            pcg.random().int(u4),
-        ) catch unreachable;
-        sched.enqueue(thread) catch unreachable;
-    }
+    // pmm.reclaimMemory();
+
+    // var regs = arch.cpuid(0, 0);
+    // std.log.debug("vendor string: {s}{s}{s}", .{
+    //     @as([*]const u8, @ptrCast(&regs.ebx))[0..4],
+    //     @as([*]const u8, @ptrCast(&regs.edx))[0..4],
+    //     @as([*]const u8, @ptrCast(&regs.ecx))[0..4],
+    // });
+
+    // regs = arch.cpuid(0x80000000, 0);
+    // if (regs.eax >= 0x80000004) {
+    //     regs = arch.cpuid(0x80000002, 0);
+    //     serial.writer.print("cpu name: {s}{s}{s}{s}", .{
+    //         @as([*]const u8, @ptrCast(&regs.eax))[0..4],
+    //         @as([*]const u8, @ptrCast(&regs.ebx))[0..4],
+    //         @as([*]const u8, @ptrCast(&regs.ecx))[0..4],
+    //         @as([*]const u8, @ptrCast(&regs.edx))[0..4],
+    //     }) catch unreachable;
+    //     regs = arch.cpuid(0x80000003, 0);
+    //     serial.writer.print("{s}{s}{s}{s}", .{
+    //         @as([*]const u8, @ptrCast(&regs.eax))[0..4],
+    //         @as([*]const u8, @ptrCast(&regs.ebx))[0..4],
+    //         @as([*]const u8, @ptrCast(&regs.ecx))[0..4],
+    //         @as([*]const u8, @ptrCast(&regs.edx))[0..4],
+    //     }) catch unreachable;
+    //     regs = arch.cpuid(0x80000004, 0);
+    //     serial.writer.print("{s}{s}{s}{s}\n", .{
+    //         @as([*]const u8, @ptrCast(&regs.eax))[0..4],
+    //         @as([*]const u8, @ptrCast(&regs.ebx))[0..4],
+    //         @as([*]const u8, @ptrCast(&regs.ecx))[0..4],
+    //         @as([*]const u8, @ptrCast(&regs.edx))[0..4],
+    //     }) catch unreachable;
+    // }
+
+    // const rand = @import("rand.zig");
+    // var pcg = rand.Pcg.init(rand.getSeedSlow());
+    // inline for (0..8) |_| {
+    //     const thread = sched.Thread.initKernel(
+    //         @ptrCast(&hihihi),
+    //         null,
+    //         pcg.random().int(u4),
+    //     ) catch unreachable;
+    //     sched.enqueue(thread) catch unreachable;
+    // }
+
+    arch.halt();
 }
 
 fn hihihi() void {
diff --git a/kernel/pmm.zig b/kernel/pmm.zig
index d7eaf9c..571c86f 100644
--- a/kernel/pmm.zig
+++ b/kernel/pmm.zig
@@ -3,17 +3,15 @@ const root = @import("root");
 const vmm = @import("vmm.zig");
 const SpinLock = @import("SpinLock.zig");
 const log = std.log.scoped(.pmm);
-
 const page_size = std.mem.page_size;
-const free_page = false;
 
+const free_page = false;
 // TODO: use u64 and bitwise operation to speed up the process?
-// TODO: decide what to do with "useless" stuff
 var bitmap: []bool = undefined;
 var last_idx: u64 = 0;
-var usable_pages: u64 = 0; // useless?
-var used_pages: u64 = 0; // useless?
-var reserved_pages: u64 = 0; // useless?
+var usable_pages: u64 = 0;
+var used_pages: u64 = 0;
+var reserved_pages: u64 = 0;
 var lock: SpinLock = .{}; // TODO: remove lock on pmm and only use
 //                                 root.allocator for risky allocations,
 //                                 or remove lock on root.allocator?
@@ -73,6 +71,21 @@ pub fn init() void {
     log.info("reserved memory: {} MiB", .{(reserved_pages * page_size) / 1024 / 1024});
 }
 
+pub fn reclaimMemory() void {
+    const memory_map = root.memory_map_request.response.?;
+
+    for (memory_map.entries()) |entry| {
+        if (entry.kind == .bootloader_reclaimable) {
+            const pages = entry.length / page_size;
+            usable_pages += pages;
+            reserved_pages -= pages;
+            free(entry.base, pages);
+
+            log.info("reclaimed {} pages at 0x{x}", .{ pages, entry.base });
+        }
+    }
+}
+
 fn innerAlloc(pages: usize, limit: u64) ?u64 {
     var p: usize = 0;
 
diff --git a/kernel/sched.zig b/kernel/sched.zig
index edafe1f..ba19e41 100644
--- a/kernel/sched.zig
+++ b/kernel/sched.zig
@@ -68,7 +68,6 @@ pub const Process = struct {
     }
 };
 
-// TODO: extern ?
 pub const Thread = struct {
     self: *Thread, // TODO
     errno: usize, // TODO
@@ -140,7 +139,7 @@ pub const Thread = struct {
         thread.gs_base = @intFromPtr(thread);
 
         thread.process = kernel_process;
-        thread.timeslice = 5000;
+        thread.timeslice = 1000;
         // TODO: calculate this once and store it in arch.cpu.fpu_storage_pages
         const pages = std.math.divCeil(u64, arch.cpu.fpu_storage_size, page_size) catch unreachable;
         const fpu_storage_phys = pmm.alloc(pages, true) orelse {
@@ -152,6 +151,8 @@ pub const Thread = struct {
 
         // kernel_process.threads.append(root.allocator, thread);
 
+        std.debug.assert(@intFromPtr(thread) == @intFromPtr(&thread.self));
+
         return thread;
     }
 
@@ -162,9 +163,9 @@ pub const Thread = struct {
 
     // TODO
     pub fn deinit(self: *Thread) void {
-        // for (self.stacks.items) |stack| {
-        //     pmm.free(stack - vmm.hhdm_offset - stack_size, stack_pages);
-        // }
+        for (self.stacks.items) |stack| {
+            pmm.free(stack, stack_pages);
+        }
         // TODO: calculate this once and store it in arch.cpu.fpu_storage_pages
         const pages = std.math.divCeil(u64, arch.cpu.fpu_storage_size, page_size) catch unreachable;
         pmm.free(self.fpu_storage - vmm.hhdm_offset, pages);
@@ -187,9 +188,10 @@ pub fn init() void {
     sched_vector = idt.allocVector();
     log.info("scheduler interrupt vector: 0x{x}", .{sched_vector});
     idt.registerHandler(sched_vector, schedHandler);
-    // idt.setIST(sched_vector, 1);
+    // idt.setIST(sched_vector, 1); // TODO
 }
 
+// TODO: save cpu in gs not thread
 pub inline fn currentThread() *Thread {
     return asm volatile (
         \\mov %%gs:0x0, %[thr]
@@ -197,6 +199,7 @@ pub inline fn currentThread() *Thread {
     );
 }
 
+// O(n)
 fn nextThread() ?*Thread {
     const ticket = random.uintLessThan(usize, total_tickets + 1);
     var sum: usize = 0;
@@ -213,6 +216,7 @@ fn nextThread() ?*Thread {
     return null;
 }
 
+// O(1)
 pub fn enqueue(thread: *Thread) !void {
     if (thread.enqueued) return;
 
@@ -234,7 +238,7 @@ pub fn enqueue(thread: *Thread) !void {
     }
 }
 
-// TODO: slow
+// O(n)
 pub fn dequeue(thread: *Thread) void {
     if (!thread.enqueued) return;
 
@@ -243,21 +247,20 @@ pub fn dequeue(thread: *Thread) void {
 
     for (running_threads.items, 0..) |thr, i| {
         if (thr == thread) {
-            _ = running_threads.orderedRemove(i);
+            _ = running_threads.swapRemove(i);
             total_tickets -= thread.tickets;
             thread.enqueued = false;
-            log.info("dequeued thread: {*} of index {}", .{ thread, i });
+            log.info("dequeued thread: {*}", .{thread});
             return;
         }
     }
     log.warn("trying to dequeue unknown thread: {*}", .{thread});
 }
 
-// TODO: deinit current thread?
-/// dequeue current thread and yield
 pub fn die() noreturn {
     arch.disableInterrupts();
     dequeue(currentThread());
+    // TODO: deinit current thread
     yield(false);
     unreachable;
 }
@@ -266,7 +269,15 @@ fn schedHandler(ctx: *arch.Context) void {
     apic.timerStop();
 
     var current_thread = currentThread();
-    std.debug.assert(@intFromPtr(current_thread) != 0);
+    std.debug.assert(@intFromPtr(current_thread) != 0); // TODO
+
+    // TODO
+    if (current_thread.scheduling_off) {
+        apic.eoi();
+        apic.timerOneShot(current_thread.timeslice, sched_vector);
+        return;
+    }
+
     const cpu = smp.thisCpu(); // TODO: current_thread.cpu
     cpu.active = true;
     const next_thread = nextThread();
@@ -289,6 +300,8 @@ fn schedHandler(ctx: *arch.Context) void {
             arch.cpu.fpuSave(current_thread.fpu_storage);
         }
 
+        // TODO
+        // current_thread.cpu = null;
         current_thread.lock.unlock();
     }
 
@@ -318,11 +331,11 @@ fn schedHandler(ctx: *arch.Context) void {
         // if (sysenter) {
         //     arch.wrmsr(0x175, @intFromPtr(current_thread.kernel_stack));
         // } else {
-        //     cpu.tss.ist3 = @intFromPtr(current_thread.kernel_stack);
+        //     cpu.tss.ist[3] = @intFromPtr(current_thread.kernel_stack);
         // }
 
         // TODO: set page fault stack
-        // cpu.tss.ist2 = @intFromPtr(current_thread.pf_stack);
+        // cpu.tss.ist[2] = current_thread.pf_stack;
 
         if (arch.readRegister("cr3") != current_thread.cr3) {
             arch.writeRegister("cr3", current_thread.cr3);
@@ -340,7 +353,7 @@ fn schedHandler(ctx: *arch.Context) void {
 
 pub fn wait() noreturn {
     arch.disableInterrupts();
-    apic.timerOneShot(10_000, sched_vector);
+    apic.timerOneShot(1000, sched_vector);
     arch.enableInterrupts();
     arch.halt();
 }
diff --git a/kernel/smp.zig b/kernel/smp.zig
index b2bd3a9..b483d6e 100644
--- a/kernel/smp.zig
+++ b/kernel/smp.zig
@@ -88,8 +88,6 @@ pub fn init() void {
             arch.out(u8, 0x21, 0xff);
             apic.init(); // smp safe
 
-            arch.enableInterrupts();
-
             log.info("bootstrap processor is online with id: {}", .{cpu_local.id});
             _ = @atomicRmw(usize, &cpus_started, .Add, 1, .AcqRel);
         } else {
@@ -142,6 +140,5 @@ fn trampoline(smp_info: *limine.SmpInfo) callconv(.C) noreturn {
     log.info("processor {} is online", .{cpu_local.id});
     _ = @atomicRmw(usize, &cpus_started, .Add, 1, .AcqRel);
 
-    arch.enableInterrupts();
     sched.wait();
 }
diff --git a/kernel/time.zig b/kernel/time.zig
index ef19b91..292afcd 100644
--- a/kernel/time.zig
+++ b/kernel/time.zig
@@ -131,7 +131,7 @@ pub fn getCurrentCount() u16 {
     arch.out(u8, 0x43, 0);
     const lo = arch.in(u8, 0x40);
     const hi = arch.in(u8, 0x40);
-    return (@as(u16, @intCast(hi)) << 8) | lo;
+    return (@as(u16, hi) << 8) | lo;
 }
 
 fn timerHandler(ctx: *arch.Context) void {
diff --git a/kernel/vmm.zig b/kernel/vmm.zig
index be67900..e831e63 100644
--- a/kernel/vmm.zig
+++ b/kernel/vmm.zig
@@ -62,6 +62,11 @@ pub const PTE = packed struct {
         self.* = @bitCast(new_page_table | present | writable | user);
         return @ptrFromInt(new_page_table + hhdm_offset);
     }
+
+    comptime {
+        std.debug.assert(@sizeOf(PTE) == @sizeOf(u64));
+        std.debug.assert(@bitSizeOf(PTE) == @bitSizeOf(u64));
+    }
 };
 
 // TODO
@@ -75,8 +80,8 @@ const Mapping = struct {
 
 pub const AddressSpace = struct {
     pml4: *[512]PTE,
-    lock: SpinLock = .{},
-    mappings: std.ArrayListUnmanaged(Mapping) = .{}, // TODO
+    lock: SpinLock,
+    mmap_ranges: std.ArrayListUnmanaged(*Mapping), // TODO
 
     const Self = @This();
 
@@ -88,6 +93,7 @@ pub const AddressSpace = struct {
         };
         addr_space.pml4 = @ptrFromInt(pml4_phys + hhdm_offset);
         addr_space.lock = .{};
+        addr_space.mmap_ranges = .{};
 
         // TODO
         // for (256..512) |i| {
@@ -170,6 +176,20 @@ pub const AddressSpace = struct {
         }
     }
 
+    inline fn mapSection(self: *Self, comptime section: []const u8, flags: u64) MapError!void {
+        const start: u64 = @intFromPtr(@extern([*]u8, .{ .name = section ++ "_start" }));
+        const end: u64 = @intFromPtr(@extern([*]u8, .{ .name = section ++ "_end" }));
+        const start_addr = alignBackward(u64, start, page_size);
+        const end_addr = alignForward(u64, end, page_size);
+        const kaddr = root.kernel_address_request.response.?;
+
+        var addr = start_addr;
+        while (addr < end_addr) : (addr += page_size) {
+            const paddr = addr - kaddr.virtual_base + kaddr.physical_base;
+            try self.mapPage(addr, paddr, flags);
+        }
+    }
+
     pub inline fn cr3(self: *const Self) u64 {
         return @intFromPtr(self.pml4) - hhdm_offset;
     }
@@ -197,9 +217,9 @@ pub fn init() MapError!void {
         _ = kaddr_space.pml4[i].getNextLevel(true);
     }
 
-    try mapSection("text", kaddr_space, PTE.present);
-    try mapSection("rodata", kaddr_space, PTE.present | PTE.noexec);
-    try mapSection("data", kaddr_space, PTE.present | PTE.writable | PTE.noexec);
+    try kaddr_space.mapSection("text", PTE.present);
+    try kaddr_space.mapSection("rodata", PTE.present | PTE.noexec);
+    try kaddr_space.mapSection("data", PTE.present | PTE.writable | PTE.noexec);
 
     // map the first 4 GiB
     var addr: u64 = 0x1000;
@@ -238,20 +258,6 @@ pub inline fn switchPageTable(page_table: u64) void {
     arch.writeRegister("cr3", page_table);
 }
 
-inline fn mapSection(comptime section: []const u8, addr_space: *AddressSpace, flags: u64) MapError!void {
-    const start: u64 = @intFromPtr(@extern([*]u8, .{ .name = section ++ "_start" }));
-    const end: u64 = @intFromPtr(@extern([*]u8, .{ .name = section ++ "_end" }));
-    const start_addr = alignBackward(u64, start, page_size);
-    const end_addr = alignForward(u64, end, page_size);
-    const kaddr = root.kernel_address_request.response.?;
-
-    var addr = start_addr;
-    while (addr < end_addr) : (addr += page_size) {
-        const paddr = addr - kaddr.virtual_base + kaddr.physical_base;
-        try addr_space.mapPage(addr, paddr, flags);
-    }
-}
-
 fn pageFaultHandler(ctx: *arch.Context) void {
     _ = ctx;
     // TODO: makes cpus crash at some point