-
Notifications
You must be signed in to change notification settings - Fork 0
/
spinlock.c
150 lines (131 loc) · 3.02 KB
/
spinlock.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
// Mutual exclusion spin locks.
#include "types.h"
#include "defs.h"
#include "param.h"
#include "sh4.h"
#include "memlayout.h"
#include "mmu.h"
#include "proc.h"
#include "spinlock.h"
void
initlock(struct spinlock *lk, char *name)
{
lk->name = name;
lk->locked = 0;
lk->cpu = 0;
}
// Acquire the lock.
// Loops (spins) until the lock is acquired.
// Holding a lock for a long time may cause
// other CPUs to waste time spinning to acquire it.
void
acquire(struct spinlock *lk)
{
pushcli(); // disable interrupts to avoid deadlock.
if(holding(lk))
panic("acquire");
// The xchg is atomic.
// It also serializes, so that reads after acquire are not
// reordered before it.
#if 0
while(xchg(&lk->locked, 1) != 0)
;
#else
// TODO: smp
while(lk->locked != 0)
;
lk->locked = 1;
#endif
// Record info about lock acquisition for debugging.
lk->cpu = cpu;
getcallerpcs(&lk, lk->pcs);
}
// Release the lock.
void
release(struct spinlock *lk)
{
if(!holding(lk))
panic("release");
lk->pcs[0] = 0;
lk->cpu = 0;
// The xchg serializes, so that reads before release are
// not reordered after it. The 1996 PentiumPro manual (Volume 3,
// 7.2) says reads can be carried out speculatively and in
// any order, which implies we need to serialize here.
// But the 2007 Intel 64 Architecture Memory Ordering White
// Paper says that Intel 64 and IA-32 will not move a load
// after a store. So lock->locked = 0 would work here.
// The xchg being asm volatile ensures gcc emits it after
// the above assignments (and after the critical section).
#if 0
xchg(&lk->locked, 0);
#else
// TODO: smp
lk->locked = 0;
#endif
popcli();
}
// Record the current call stack in pcs[] by following the %ebp chain.
void
getcallerpcs(void *v, uint pcs[])
{
// XXX: Due to a different frame in SH4,
// it is left for further works.
uint *ebp;
int i;
ebp = (uint*)v - 2;
for(i = 0; i < 10; i++){
break;
if(ebp == 0 || ebp < (uint*)0x100000 || ebp == (uint*)0xffffffff)
break;
pcs[i] = ebp[1]; // saved %eip
ebp = (uint*)ebp[0]; // saved %ebp
}
for(; i < 10; i++)
pcs[i] = 0;
}
// Check whether this cpu is holding the lock.
int
holding(struct spinlock *lock)
{
return lock->locked && lock->cpu == cpu;
}
// Pushcli/popcli are like cli/sti except that they are matched:
// it takes two popcli to undo two pushcli. Also, if interrupts
// are off, then pushcli, popcli leaves them off.
void
pushcli(void)
{
#if 0
int eflags;
eflags = readeflags();
cli();
if(cpu->ncli++ == 0)
cpu->intena = eflags & FL_IF;
#else
int sr;
sr = read_sr();
cli();
if(cpu->ncli++ == 0)
cpu->intena = sr & SR_BL_MASK;
#endif
}
void
popcli(void)
{
#if 0
if(readeflags()&FL_IF)
panic("popcli - interruptible");
if(--cpu->ncli < 0)
panic("popcli");
if(cpu->ncli == 0 && cpu->intena)
sti();
#else
if(!(read_sr() & SR_BL_MASK))
panic("popcli - interruptible");
if(--cpu->ncli < 0)
panic("popcli");
if(cpu->ncli == 0 && !cpu->intena)
sti();
#endif
}