mmu_notifier.c 9.41 KB
Newer Older
Andrea Arcangeli's avatar
Andrea Arcangeli committed
1
2
3
4
5
6
7
8
9
10
11
12
13
/*
 *  linux/mm/mmu_notifier.c
 *
 *  Copyright (C) 2008  Qumranet, Inc.
 *  Copyright (C) 2008  SGI
 *             Christoph Lameter <clameter@sgi.com>
 *
 *  This work is licensed under the terms of the GNU GPL, version 2. See
 *  the COPYING file in the top-level directory.
 */

#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
14
#include <linux/export.h>
Andrea Arcangeli's avatar
Andrea Arcangeli committed
15
16
#include <linux/mm.h>
#include <linux/err.h>
17
#include <linux/srcu.h>
Andrea Arcangeli's avatar
Andrea Arcangeli committed
18
19
#include <linux/rcupdate.h>
#include <linux/sched.h>
20
#include <linux/slab.h>
Andrea Arcangeli's avatar
Andrea Arcangeli committed
21

22
/* global SRCU for all MMs */
23
static struct srcu_struct srcu;
24

Andrea Arcangeli's avatar
Andrea Arcangeli committed
25
26
27
28
29
30
31
/*
 * This function can't run concurrently against mmu_notifier_register
 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
 * in parallel despite there being no task using this mm any more,
 * through the vmas outside of the exit_mmap context, such as with
 * vmtruncate. This serializes against mmu_notifier_unregister with
32
33
 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
Andrea Arcangeli's avatar
Andrea Arcangeli committed
34
35
36
37
38
39
 * can't go away from under us as exit_mmap holds an mm_count pin
 * itself.
 */
void __mmu_notifier_release(struct mm_struct *mm)
{
	struct mmu_notifier *mn;
40
	struct hlist_node *n;
41
	int id;
42
43

	/*
44
	 * SRCU here will block mmu_notifier_unregister until
45
46
	 * ->release returns.
	 */
47
	id = srcu_read_lock(&srcu);
48
49
50
51
52
53
54
55
56
57
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
		/*
		 * if ->release runs before mmu_notifier_unregister it
		 * must be handled as it's the only way for the driver
		 * to flush all existing sptes and stop the driver
		 * from establishing any more sptes before all the
		 * pages in the mm are freed.
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);
58
	srcu_read_unlock(&srcu, id);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75

	spin_lock(&mm->mmu_notifier_mm->lock);
	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
				 struct mmu_notifier,
				 hlist);
		/*
		 * We arrived before mmu_notifier_unregister so
		 * mmu_notifier_unregister will do nothing other than
		 * to wait ->release to finish and
		 * mmu_notifier_unregister to return.
		 */
		hlist_del_init_rcu(&mn->hlist);
	}
	spin_unlock(&mm->mmu_notifier_mm->lock);

	/*
76
	 * synchronize_srcu here prevents mmu_notifier_release to
Andrea Arcangeli's avatar
Andrea Arcangeli committed
77
78
79
80
81
82
83
	 * return to exit_mmap (which would proceed freeing all pages
	 * in the mm) until the ->release method returns, if it was
	 * invoked by mmu_notifier_unregister.
	 *
	 * The mmu_notifier_mm can't go away from under us because one
	 * mm_count is hold by exit_mmap.
	 */
84
	synchronize_srcu(&srcu);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
85
86
87
88
89
90
91
92
93
94
95
96
}

/*
 * If no young bitflag is supported by the hardware, ->clear_flush_young can
 * unmap the address and return 1 or 0 depending if the mapping previously
 * existed or not.
 */
int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
					unsigned long address)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
97
	int young = 0, id;
Andrea Arcangeli's avatar
Andrea Arcangeli committed
98

99
	id = srcu_read_lock(&srcu);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
100
101
102
103
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->clear_flush_young)
			young |= mn->ops->clear_flush_young(mn, mm, address);
	}
104
	srcu_read_unlock(&srcu, id);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
105
106
107
108

	return young;
}

109
110
111
112
113
int __mmu_notifier_test_young(struct mm_struct *mm,
			      unsigned long address)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
114
	int young = 0, id;
115

116
	id = srcu_read_lock(&srcu);
117
118
119
120
121
122
123
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->test_young) {
			young = mn->ops->test_young(mn, mm, address);
			if (young)
				break;
		}
	}
124
	srcu_read_unlock(&srcu, id);
125
126
127
128

	return young;
}

129
130
131
132
133
void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
			       pte_t pte)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
134
	int id;
135

136
	id = srcu_read_lock(&srcu);
137
138
139
140
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->change_pte)
			mn->ops->change_pte(mn, mm, address, pte);
	}
141
	srcu_read_unlock(&srcu, id);
142
143
}

Andrea Arcangeli's avatar
Andrea Arcangeli committed
144
145
146
147
148
void __mmu_notifier_invalidate_page(struct mm_struct *mm,
					  unsigned long address)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
149
	int id;
Andrea Arcangeli's avatar
Andrea Arcangeli committed
150

151
	id = srcu_read_lock(&srcu);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
152
153
154
155
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->invalidate_page)
			mn->ops->invalidate_page(mn, mm, address);
	}
156
	srcu_read_unlock(&srcu, id);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
157
158
159
160
161
162
163
}

void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
164
	int id;
Andrea Arcangeli's avatar
Andrea Arcangeli committed
165

166
	id = srcu_read_lock(&srcu);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
167
168
169
170
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->invalidate_range_start)
			mn->ops->invalidate_range_start(mn, mm, start, end);
	}
171
	srcu_read_unlock(&srcu, id);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
172
173
174
175
176
177
178
}

void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
179
	int id;
Andrea Arcangeli's avatar
Andrea Arcangeli committed
180

181
	id = srcu_read_lock(&srcu);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
182
183
184
185
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->invalidate_range_end)
			mn->ops->invalidate_range_end(mn, mm, start, end);
	}
186
	srcu_read_unlock(&srcu, id);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
187
188
189
190
191
192
193
194
195
196
197
}

static int do_mmu_notifier_register(struct mmu_notifier *mn,
				    struct mm_struct *mm,
				    int take_mmap_sem)
{
	struct mmu_notifier_mm *mmu_notifier_mm;
	int ret;

	BUG_ON(atomic_read(&mm->mm_users) <= 0);

198
	/*
199
200
201
	 * Verify that mmu_notifier_init() already run and the global srcu is
	 * initialized.
	 */
202
203
	BUG_ON(!srcu.per_cpu_ref);

204
205
206
207
208
	ret = -ENOMEM;
	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
	if (unlikely(!mmu_notifier_mm))
		goto out;

Andrea Arcangeli's avatar
Andrea Arcangeli committed
209
210
211
212
	if (take_mmap_sem)
		down_write(&mm->mmap_sem);
	ret = mm_take_all_locks(mm);
	if (unlikely(ret))
213
		goto out_clean;
Andrea Arcangeli's avatar
Andrea Arcangeli committed
214
215
216
217

	if (!mm_has_notifiers(mm)) {
		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
		spin_lock_init(&mmu_notifier_mm->lock);
218

Andrea Arcangeli's avatar
Andrea Arcangeli committed
219
		mm->mmu_notifier_mm = mmu_notifier_mm;
220
		mmu_notifier_mm = NULL;
Andrea Arcangeli's avatar
Andrea Arcangeli committed
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
	}
	atomic_inc(&mm->mm_count);

	/*
	 * Serialize the update against mmu_notifier_unregister. A
	 * side note: mmu_notifier_release can't run concurrently with
	 * us because we hold the mm_users pin (either implicitly as
	 * current->mm or explicitly with get_task_mm() or similar).
	 * We can't race against any other mmu notifier method either
	 * thanks to mm_take_all_locks().
	 */
	spin_lock(&mm->mmu_notifier_mm->lock);
	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
	spin_unlock(&mm->mmu_notifier_mm->lock);

	mm_drop_all_locks(mm);
237
out_clean:
Andrea Arcangeli's avatar
Andrea Arcangeli committed
238
239
	if (take_mmap_sem)
		up_write(&mm->mmap_sem);
240
241
	kfree(mmu_notifier_mm);
out:
Andrea Arcangeli's avatar
Andrea Arcangeli committed
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
	BUG_ON(atomic_read(&mm->mm_users) <= 0);
	return ret;
}

/*
 * Must not hold mmap_sem nor any other VM related lock when calling
 * this registration function. Must also ensure mm_users can't go down
 * to zero while this runs to avoid races with mmu_notifier_release,
 * so mm has to be current->mm or the mm should be pinned safely such
 * as with get_task_mm(). If the mm is not current->mm, the mm_users
 * pin should be released by calling mmput after mmu_notifier_register
 * returns. mmu_notifier_unregister must be always called to
 * unregister the notifier. mm_count is automatically pinned to allow
 * mmu_notifier_unregister to safely run at any time later, before or
 * after exit_mmap. ->release will always be called before exit_mmap
 * frees the pages.
 */
int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 1);
}
EXPORT_SYMBOL_GPL(mmu_notifier_register);

/*
 * Same as mmu_notifier_register but here the caller must hold the
 * mmap_sem in write mode.
 */
int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 0);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_register);

/* this is called after the last mmu_notifier_unregister() returned */
void __mmu_notifier_mm_destroy(struct mm_struct *mm)
{
	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
	kfree(mm->mmu_notifier_mm);
	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
}

/*
 * This releases the mm_count pin automatically and frees the mm
 * structure if it was the last user of it. It serializes against
286
287
 * running mmu notifiers with SRCU and against mmu_notifier_unregister
 * with the unregister lock + SRCU. All sptes must be dropped before
Andrea Arcangeli's avatar
Andrea Arcangeli committed
288
289
290
291
292
293
294
295
296
297
298
 * calling mmu_notifier_unregister. ->release or any other notifier
 * method may be invoked concurrently with mmu_notifier_unregister,
 * and only after mmu_notifier_unregister returned we're guaranteed
 * that ->release or any other method can't run anymore.
 */
void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
{
	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	if (!hlist_unhashed(&mn->hlist)) {
		/*
299
		 * SRCU here will force exit_mmap to wait ->release to finish
Andrea Arcangeli's avatar
Andrea Arcangeli committed
300
301
		 * before freeing the pages.
		 */
302
		int id;
303

304
		id = srcu_read_lock(&srcu);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
305
306
307
308
309
310
311
		/*
		 * exit_mmap will block in mmu_notifier_release to
		 * guarantee ->release is called before freeing the
		 * pages.
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);
312
		srcu_read_unlock(&srcu, id);
313
314
315

		spin_lock(&mm->mmu_notifier_mm->lock);
		hlist_del_rcu(&mn->hlist);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
316
		spin_unlock(&mm->mmu_notifier_mm->lock);
317
	}
Andrea Arcangeli's avatar
Andrea Arcangeli committed
318
319
320
321
322

	/*
	 * Wait any running method to finish, of course including
	 * ->release if it was run by mmu_notifier_relase instead of us.
	 */
323
	synchronize_srcu(&srcu);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
324
325
326
327
328
329

	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
330
331
332
333
334
335
336

static int __init mmu_notifier_init(void)
{
	return init_srcu_struct(&srcu);
}

module_init(mmu_notifier_init);