-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmetadata.c
153 lines (124 loc) · 3.75 KB
/
metadata.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
/*
* metadata.c
*
* Copyright (C) 2014
* Maxime Lorrillere <[email protected]>
* LIP6 - Laboratoire d'Informatique de Paris 6
*/
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/lockdep.h>
#include "metadata.h"
/*
* As we can't do atomic wait/wakeup with radix tree tags, we use a hash table
* to store busy pages state. On a racy state, the waiter atomically adds a
* busy_page struct into the hash table and wait on a bit to be cleared.
*
* On the other side, when an ack is received, the bit is cleared and all
* threads wating for a busy page are woken.
*/
DECLARE_WAIT_QUEUE_HEAD(wait_busy_page);
DEFINE_HASHTABLE(busy_pages_hash, 6);
DEFINE_SPINLOCK(busy_pages_lock);
struct busy_page {
struct hlist_node hash;
ino_t ino;
pgoff_t index;
atomic_t state;
};
static int sleep_on_remotecache_page_metadata(void *word)
{
schedule();
return 0;
}
void wake_up_remotecache_page_metadata(void *word, int bit)
{
wake_up_bit(word, bit);
}
void wait_on_remotecache_page_metadata(void *word, int bit)
{
wait_on_bit(word, bit, sleep_on_remotecache_page_metadata,
TASK_UNINTERRUPTIBLE);
}
void __remotecache_metadata_clear_busy(struct remotecache_inode *inode, pgoff_t index)
{
unsigned long *entry;
struct busy_page *bp;
lockdep_assert_held(&inode->lock);
smp_mb__before_clear_bit();
entry = radix_tree_tag_clear(&inode->pages_tree, index,
REMOTECACHE_TAG_BUSY);
BUG_ON(!entry);
smp_mb__after_clear_bit();
rc_debug("%s inode %lu index %lu value %lx\n", __func__, inode->ino,
index, (unsigned long)entry);
rcu_read_lock();
hash_for_each_possible_rcu(busy_pages_hash, bp, hash, inode->ino^index) {
if (bp->ino == inode->ino && bp->index == index) {
rc_debug("%s inode %lu index %lu bp %p\n", __func__, inode->ino, index, bp);
atomic_set(&bp->state, 0);
smp_mb__after_clear_bit();
}
}
rcu_read_unlock();
wake_up(&wait_busy_page);
}
void __remotecache_metadata_set_busy(struct remotecache_inode *inode, pgoff_t index)
{
unsigned long *entry;
lockdep_assert_held(&inode->lock);
entry = radix_tree_tag_set(&inode->pages_tree, index, REMOTECACHE_TAG_BUSY);
rc_debug("%s inode %lu index %lu value %lx\n", __func__, inode->ino,
index, *entry);
}
void remotecache_metadata_wait_busy(struct remotecache_inode *inode, pgoff_t index)
{
unsigned long flags;
struct busy_page bp = {.ino = inode->ino,
.index = index,
.state = ATOMIC_INIT(1)};
INIT_HLIST_NODE(&bp.hash);
rcu_read_lock();
if (!radix_tree_tag_get(&inode->pages_tree, index,
REMOTECACHE_TAG_BUSY)) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
spin_lock_irqsave(&busy_pages_lock, flags);
hash_add_rcu(busy_pages_hash, &bp.hash, bp.ino^bp.index);
spin_unlock_irqrestore(&busy_pages_lock, flags);
/*
* Concurrent wait busy/clear busy: BUSY tag may have been cleared and
* waiters woken up before we add bp to the hash table.
*/
synchronize_rcu();
rcu_read_lock();
smp_mb__after_unlock_lock();
if (!radix_tree_tag_get(&inode->pages_tree, index,
REMOTECACHE_TAG_BUSY)) {
rcu_read_unlock();
goto out;
}
rcu_read_unlock();
rc_debug("%s wait on inode %lu index %lu bp %p\n", __func__, inode->ino, index, &bp);
while (atomic_read(&bp.state) != 0) {
DEFINE_WAIT(wait);
prepare_to_wait(&wait_busy_page, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_read(&bp.state) != 0 && !schedule_timeout(5*HZ)) {
WARN_ON(true);
finish_wait(&wait_busy_page, &wait);
goto out;
}
finish_wait(&wait_busy_page, &wait);
}
out:
spin_lock_irqsave(&busy_pages_lock, flags);
hash_del_rcu(&bp.hash);
spin_unlock_irqrestore(&busy_pages_lock, flags);
synchronize_rcu();
rc_debug("%s done on inode %lu index %lu bp %p\n", __func__, inode->ino, index, &bp);
}