-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathk-cpu.cc
154 lines (125 loc) · 4.63 KB
/
k-cpu.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
#include "kernel.hh"
#include "k-apic.hh"
#include "k-vmiter.hh"
cpustate cpus[MAXCPU];
int ncpu;
// cpustate::init()
// Initialize a `cpustate`. Should be called once per active CPU,
// by the relevant CPU.
void cpustate::init() {
// Note that the `cpu::cpu` constructor has already been called.
{
// check that this CPU is one of the expected CPUs
uintptr_t addr = reinterpret_cast<uintptr_t>(this);
assert((addr & PAGEOFFMASK) == 0);
assert(this >= cpus && this < cpus + MAXCPU);
assert(rdrsp() > addr && rdrsp() <= addr + CPUSTACK_SIZE);
// ensure layout `k-exception.S` expects
assert(reinterpret_cast<uintptr_t>(&self_) == addr);
assert(reinterpret_cast<uintptr_t>(¤t_) == addr + 8);
assert(reinterpret_cast<uintptr_t>(&syscall_scratch_) == addr + 16);
}
assert(self_ == this && !current_);
cpuindex_ = this - cpus;
runq_lock_.clear();
idle_task_ = nullptr;
nschedule_ = 0;
spinlock_depth_ = 0;
// now initialize the CPU hardware
init_cpu_hardware();
}
// cpustate::enable_irq(irqno)
// Enable external interrupt `irqno`, delivering it to
// this CPU.
void cpustate::enable_irq(int irqno) {
assert(irqno >= IRQ_TIMER && irqno <= IRQ_SPURIOUS);
auto& ioapic = ioapicstate::get();
ioapic.enable_irq(irqno, INT_IRQ + irqno, lapic_id_);
}
// cpustate::disable_irq(irqno)
// Disable external interrupt `irqno`.
void cpustate::disable_irq(int irqno) {
assert(irqno >= IRQ_TIMER && irqno <= IRQ_SPURIOUS);
auto& ioapic = ioapicstate::get();
ioapic.disable_irq(irqno);
}
// cpustate::enqueue(p)
// Enqueue `p` on this CPU's run queue. Acquires `runq_lock_`. Does nothing
// if `p` is on a run queue or is currently running on this CPU; otherwise
// `p` must be resumable (or not runnable).
void cpustate::enqueue(proc* p) {
spinlock_guard guard(runq_lock_);
if (current_ != p && !p->runq_links_.is_linked()) {
assert(p->resumable() || p->pstate_ != proc::ps_runnable);
runq_.push_back(p);
p->home_cpuindex_ = cpuindex_;
}
}
// cpustate::schedule(yielding_from)
// Run a process, or the current CPU's idle task if no runnable
// process exists. If `yielding_from != nullptr`, then do not
// run `yielding_from` unless no other runnable process exists.
void cpustate::schedule(proc* yielding_from) {
assert(contains(rdrsp())); // running on CPU stack
assert(is_cli()); // interrupts are currently disabled
assert(spinlock_depth_ == 0); // no spinlocks are held
// Exited processes (pstate_ == ps_blank)
if (yielding_from->pstate_ == proc::ps_blank) {
kfree(yielding_from);
}
// initialize idle task
if (!idle_task_) {
init_idle_task();
}
// don't immediately re-run idle task
if (current_ == idle_task_) {
yielding_from = idle_task_;
}
// increment schedule counter
++nschedule_;
// find a runnable process
while (!current_
|| current_->pstate_ != proc::ps_runnable
|| current_ == yielding_from) {
runq_lock_.lock_noirq();
// re-enqueue old current if necessary
proc* prev = current_;
if (prev && prev->pstate_ == proc::ps_runnable) {
assert(prev->resumable());
assert(!prev->runq_links_.is_linked());
runq_.push_back(prev);
}
// run idle task as last resort
current_ = runq_.empty() ? idle_task_ : runq_.pop_front();
runq_lock_.unlock_noirq();
// no need to skip `current_` if no other runnable procs
yielding_from = nullptr;
}
// run `current_`
set_pagetable(current_->pagetable_);
if (current_->tg_->should_exit_) {
log_printf("HELLO! exiting early pid[%d] tgid[%d] %d\n", current_->id_, current_->tgid_, current_->tg_->process_exit_status_);
current_->texit(current_->tg_->process_exit_status_);
}
current_->resume_count_++;
current_->resume(); // does not return
}
// cpustate::idle_task
// Every CPU has an *idle task*, which is a kernel task (i.e., a
// `proc` that runs in kernel mode) that just stops the processor
// until an interrupt is received. The idle task runs when a CPU
// has nothing better to do.
void idle() {
sti();
while (true) {
asm volatile("hlt");
}
}
void cpustate::init_idle_task() {
assert(!idle_task_);
threadgroup* idle_task_tg = knew<threadgroup>();
idle_task_tg->init(-1, -1, early_pagetable);
idle_task_ = knew<proc>();
idle_task_->init_kernel(-1, idle_task_tg, idle);
idle_task_tg->add_proc_to_thread_list(idle_task_);
}