Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0-only
2 : : /*
3 : : * linux/mm/mmu_notifier.c
4 : : *
5 : : * Copyright (C) 2008 Qumranet, Inc.
6 : : * Copyright (C) 2008 SGI
7 : : * Christoph Lameter <cl@linux.com>
8 : : */
9 : :
10 : : #include <linux/rculist.h>
11 : : #include <linux/mmu_notifier.h>
12 : : #include <linux/export.h>
13 : : #include <linux/mm.h>
14 : : #include <linux/err.h>
15 : : #include <linux/interval_tree.h>
16 : : #include <linux/srcu.h>
17 : : #include <linux/rcupdate.h>
18 : : #include <linux/sched.h>
19 : : #include <linux/sched/mm.h>
20 : : #include <linux/slab.h>
21 : :
22 : : /* global SRCU for all MMs */
23 : : DEFINE_STATIC_SRCU(srcu);
24 : :
25 : : #ifdef CONFIG_LOCKDEP
26 : : struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
27 : : .name = "mmu_notifier_invalidate_range_start"
28 : : };
29 : : #endif
30 : :
31 : : /*
32 : : * The mmu_notifier_subscriptions structure is allocated and installed in
33 : : * mm->notifier_subscriptions inside the mm_take_all_locks() protected
34 : : * critical section and it's released only when mm_count reaches zero
35 : : * in mmdrop().
36 : : */
37 : : struct mmu_notifier_subscriptions {
38 : : /* all mmu notifiers registered in this mm are queued in this list */
39 : : struct hlist_head list;
40 : : bool has_itree;
41 : : /* to serialize the list modifications and hlist_unhashed */
42 : : spinlock_t lock;
43 : : unsigned long invalidate_seq;
44 : : unsigned long active_invalidate_ranges;
45 : : struct rb_root_cached itree;
46 : : wait_queue_head_t wq;
47 : : struct hlist_head deferred_list;
48 : : };
49 : :
50 : : /*
51 : : * This is a collision-retry read-side/write-side 'lock', a lot like a
52 : : * seqcount, however this allows multiple write-sides to hold it at
53 : : * once. Conceptually the write side is protecting the values of the PTEs in
54 : : * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
55 : : * writer exists.
56 : : *
57 : : * Note that the core mm creates nested invalidate_range_start()/end() regions
58 : : * within the same thread, and runs invalidate_range_start()/end() in parallel
59 : : * on multiple CPUs. This is designed to not reduce concurrency or block
60 : : * progress on the mm side.
61 : : *
62 : : * As a secondary function, holding the full write side also serves to prevent
63 : : * writers for the itree, this is an optimization to avoid extra locking
64 : : * during invalidate_range_start/end notifiers.
65 : : *
66 : : * The write side has two states, fully excluded:
67 : : * - mm->active_invalidate_ranges != 0
68 : : * - subscriptions->invalidate_seq & 1 == True (odd)
69 : : * - some range on the mm_struct is being invalidated
70 : : * - the itree is not allowed to change
71 : : *
72 : : * And partially excluded:
73 : : * - mm->active_invalidate_ranges != 0
74 : : * - subscriptions->invalidate_seq & 1 == False (even)
75 : : * - some range on the mm_struct is being invalidated
76 : : * - the itree is allowed to change
77 : : *
78 : : * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
79 : : * seq |= 1 # Begin writing
80 : : * seq++ # Release the writing state
81 : : * seq & 1 # True if a writer exists
82 : : *
83 : : * The later state avoids some expensive work on inv_end in the common case of
84 : : * no mmu_interval_notifier monitoring the VA.
85 : : */
86 : : static bool
87 : 0 : mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
88 : : {
89 : 0 : lockdep_assert_held(&subscriptions->lock);
90 : 0 : return subscriptions->invalidate_seq & 1;
91 : : }
92 : :
93 : : static struct mmu_interval_notifier *
94 : : mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
95 : : const struct mmu_notifier_range *range,
96 : : unsigned long *seq)
97 : : {
98 : : struct interval_tree_node *node;
99 : : struct mmu_interval_notifier *res = NULL;
100 : :
101 : : spin_lock(&subscriptions->lock);
102 : : subscriptions->active_invalidate_ranges++;
103 : : node = interval_tree_iter_first(&subscriptions->itree, range->start,
104 : : range->end - 1);
105 : : if (node) {
106 : : subscriptions->invalidate_seq |= 1;
107 : : res = container_of(node, struct mmu_interval_notifier,
108 : : interval_tree);
109 : : }
110 : :
111 : : *seq = subscriptions->invalidate_seq;
112 : : spin_unlock(&subscriptions->lock);
113 : : return res;
114 : : }
115 : :
116 : : static struct mmu_interval_notifier *
117 : 0 : mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
118 : : const struct mmu_notifier_range *range)
119 : : {
120 : 0 : struct interval_tree_node *node;
121 : :
122 : 0 : node = interval_tree_iter_next(&interval_sub->interval_tree,
123 : : range->start, range->end - 1);
124 [ # # # # ]: 0 : if (!node)
125 : : return NULL;
126 : : return container_of(node, struct mmu_interval_notifier, interval_tree);
127 : : }
128 : :
129 : 0 : static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
130 : : {
131 : 0 : struct mmu_interval_notifier *interval_sub;
132 : 0 : struct hlist_node *next;
133 : :
134 : 0 : spin_lock(&subscriptions->lock);
135 [ # # ]: 0 : if (--subscriptions->active_invalidate_ranges ||
136 [ # # ]: 0 : !mn_itree_is_invalidating(subscriptions)) {
137 : 0 : spin_unlock(&subscriptions->lock);
138 : 0 : return;
139 : : }
140 : :
141 : : /* Make invalidate_seq even */
142 : 0 : subscriptions->invalidate_seq++;
143 : :
144 : : /*
145 : : * The inv_end incorporates a deferred mechanism like rtnl_unlock().
146 : : * Adds and removes are queued until the final inv_end happens then
147 : : * they are progressed. This arrangement for tree updates is used to
148 : : * avoid using a blocking lock during invalidate_range_start.
149 : : */
150 [ # # # # : 0 : hlist_for_each_entry_safe(interval_sub, next,
# # ]
151 : : &subscriptions->deferred_list,
152 : : deferred_item) {
153 [ # # ]: 0 : if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
154 : 0 : interval_tree_insert(&interval_sub->interval_tree,
155 : : &subscriptions->itree);
156 : : else
157 : 0 : interval_tree_remove(&interval_sub->interval_tree,
158 : : &subscriptions->itree);
159 [ # # ]: 0 : hlist_del(&interval_sub->deferred_item);
160 : : }
161 : 0 : spin_unlock(&subscriptions->lock);
162 : :
163 : 0 : wake_up_all(&subscriptions->wq);
164 : : }
165 : :
166 : : /**
167 : : * mmu_interval_read_begin - Begin a read side critical section against a VA
168 : : * range
169 : : * interval_sub: The interval subscription
170 : : *
171 : : * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
172 : : * collision-retry scheme similar to seqcount for the VA range under
173 : : * subscription. If the mm invokes invalidation during the critical section
174 : : * then mmu_interval_read_retry() will return true.
175 : : *
176 : : * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
177 : : * require a blocking context. The critical region formed by this can sleep,
178 : : * and the required 'user_lock' can also be a sleeping lock.
179 : : *
180 : : * The caller is required to provide a 'user_lock' to serialize both teardown
181 : : * and setup.
182 : : *
183 : : * The return value should be passed to mmu_interval_read_retry().
184 : : */
185 : : unsigned long
186 : 0 : mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
187 : : {
188 : 0 : struct mmu_notifier_subscriptions *subscriptions =
189 : 0 : interval_sub->mm->notifier_subscriptions;
190 : 0 : unsigned long seq;
191 : 0 : bool is_invalidating;
192 : :
193 : : /*
194 : : * If the subscription has a different seq value under the user_lock
195 : : * than we started with then it has collided.
196 : : *
197 : : * If the subscription currently has the same seq value as the
198 : : * subscriptions seq, then it is currently between
199 : : * invalidate_start/end and is colliding.
200 : : *
201 : : * The locking looks broadly like this:
202 : : * mn_tree_invalidate_start(): mmu_interval_read_begin():
203 : : * spin_lock
204 : : * seq = READ_ONCE(interval_sub->invalidate_seq);
205 : : * seq == subs->invalidate_seq
206 : : * spin_unlock
207 : : * spin_lock
208 : : * seq = ++subscriptions->invalidate_seq
209 : : * spin_unlock
210 : : * op->invalidate_range():
211 : : * user_lock
212 : : * mmu_interval_set_seq()
213 : : * interval_sub->invalidate_seq = seq
214 : : * user_unlock
215 : : *
216 : : * [Required: mmu_interval_read_retry() == true]
217 : : *
218 : : * mn_itree_inv_end():
219 : : * spin_lock
220 : : * seq = ++subscriptions->invalidate_seq
221 : : * spin_unlock
222 : : *
223 : : * user_lock
224 : : * mmu_interval_read_retry():
225 : : * interval_sub->invalidate_seq != seq
226 : : * user_unlock
227 : : *
228 : : * Barriers are not needed here as any races here are closed by an
229 : : * eventual mmu_interval_read_retry(), which provides a barrier via the
230 : : * user_lock.
231 : : */
232 : 0 : spin_lock(&subscriptions->lock);
233 : : /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
234 : 0 : seq = READ_ONCE(interval_sub->invalidate_seq);
235 : 0 : is_invalidating = seq == subscriptions->invalidate_seq;
236 : 0 : spin_unlock(&subscriptions->lock);
237 : :
238 : : /*
239 : : * interval_sub->invalidate_seq must always be set to an odd value via
240 : : * mmu_interval_set_seq() using the provided cur_seq from
241 : : * mn_itree_inv_start_range(). This ensures that if seq does wrap we
242 : : * will always clear the below sleep in some reasonable time as
243 : : * subscriptions->invalidate_seq is even in the idle state.
244 : : */
245 : 0 : lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
246 : 0 : lock_map_release(&__mmu_notifier_invalidate_range_start_map);
247 [ # # ]: 0 : if (is_invalidating)
248 [ # # # # ]: 0 : wait_event(subscriptions->wq,
249 : : READ_ONCE(subscriptions->invalidate_seq) != seq);
250 : :
251 : : /*
252 : : * Notice that mmu_interval_read_retry() can already be true at this
253 : : * point, avoiding loops here allows the caller to provide a global
254 : : * time bound.
255 : : */
256 : :
257 : 0 : return seq;
258 : : }
259 : : EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
260 : :
261 : 0 : static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
262 : : struct mm_struct *mm)
263 : : {
264 : 0 : struct mmu_notifier_range range = {
265 : : .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
266 : : .event = MMU_NOTIFY_RELEASE,
267 : : .mm = mm,
268 : : .start = 0,
269 : : .end = ULONG_MAX,
270 : : };
271 : 0 : struct mmu_interval_notifier *interval_sub;
272 : 0 : unsigned long cur_seq;
273 : 0 : bool ret;
274 : :
275 : 0 : for (interval_sub =
276 : 0 : mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
277 [ # # ]: 0 : interval_sub;
278 : 0 : interval_sub = mn_itree_inv_next(interval_sub, &range)) {
279 : 0 : ret = interval_sub->ops->invalidate(interval_sub, &range,
280 : : cur_seq);
281 [ # # ]: 0 : WARN_ON(!ret);
282 : : }
283 : :
284 : 0 : mn_itree_inv_end(subscriptions);
285 : 0 : }
286 : :
287 : : /*
288 : : * This function can't run concurrently against mmu_notifier_register
289 : : * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
290 : : * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
291 : : * in parallel despite there being no task using this mm any more,
292 : : * through the vmas outside of the exit_mmap context, such as with
293 : : * vmtruncate. This serializes against mmu_notifier_unregister with
294 : : * the notifier_subscriptions->lock in addition to SRCU and it serializes
295 : : * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
296 : : * can't go away from under us as exit_mmap holds an mm_count pin
297 : : * itself.
298 : : */
299 : 0 : static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
300 : : struct mm_struct *mm)
301 : : {
302 : 0 : struct mmu_notifier *subscription;
303 : 0 : int id;
304 : :
305 : : /*
306 : : * SRCU here will block mmu_notifier_unregister until
307 : : * ->release returns.
308 : : */
309 : 0 : id = srcu_read_lock(&srcu);
310 [ # # # # : 0 : hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
# # ]
311 : : srcu_read_lock_held(&srcu))
312 : : /*
313 : : * If ->release runs before mmu_notifier_unregister it must be
314 : : * handled, as it's the only way for the driver to flush all
315 : : * existing sptes and stop the driver from establishing any more
316 : : * sptes before all the pages in the mm are freed.
317 : : */
318 [ # # ]: 0 : if (subscription->ops->release)
319 : 0 : subscription->ops->release(subscription, mm);
320 : :
321 : 0 : spin_lock(&subscriptions->lock);
322 [ # # ]: 0 : while (unlikely(!hlist_empty(&subscriptions->list))) {
323 : 0 : subscription = hlist_entry(subscriptions->list.first,
324 : : struct mmu_notifier, hlist);
325 : : /*
326 : : * We arrived before mmu_notifier_unregister so
327 : : * mmu_notifier_unregister will do nothing other than to wait
328 : : * for ->release to finish and for mmu_notifier_unregister to
329 : : * return.
330 : : */
331 [ # # ]: 0 : hlist_del_init_rcu(&subscription->hlist);
332 : : }
333 : 0 : spin_unlock(&subscriptions->lock);
334 : 0 : srcu_read_unlock(&srcu, id);
335 : :
336 : : /*
337 : : * synchronize_srcu here prevents mmu_notifier_release from returning to
338 : : * exit_mmap (which would proceed with freeing all pages in the mm)
339 : : * until the ->release method returns, if it was invoked by
340 : : * mmu_notifier_unregister.
341 : : *
342 : : * The notifier_subscriptions can't go away from under us because
343 : : * one mm_count is held by exit_mmap.
344 : : */
345 : 0 : synchronize_srcu(&srcu);
346 : 0 : }
347 : :
348 : 0 : void __mmu_notifier_release(struct mm_struct *mm)
349 : : {
350 : 0 : struct mmu_notifier_subscriptions *subscriptions =
351 : : mm->notifier_subscriptions;
352 : :
353 [ # # ]: 0 : if (subscriptions->has_itree)
354 : 0 : mn_itree_release(subscriptions, mm);
355 : :
356 [ # # ]: 0 : if (!hlist_empty(&subscriptions->list))
357 : 0 : mn_hlist_release(subscriptions, mm);
358 : 0 : }
359 : :
360 : : /*
361 : : * If no young bitflag is supported by the hardware, ->clear_flush_young can
362 : : * unmap the address and return 1 or 0 depending if the mapping previously
363 : : * existed or not.
364 : : */
365 : 0 : int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
366 : : unsigned long start,
367 : : unsigned long end)
368 : : {
369 : 0 : struct mmu_notifier *subscription;
370 : 0 : int young = 0, id;
371 : :
372 : 0 : id = srcu_read_lock(&srcu);
373 [ # # # # : 0 : hlist_for_each_entry_rcu(subscription,
# # ]
374 : : &mm->notifier_subscriptions->list, hlist,
375 : : srcu_read_lock_held(&srcu)) {
376 [ # # ]: 0 : if (subscription->ops->clear_flush_young)
377 : 0 : young |= subscription->ops->clear_flush_young(
378 : : subscription, mm, start, end);
379 : : }
380 : 0 : srcu_read_unlock(&srcu, id);
381 : :
382 : 0 : return young;
383 : : }
384 : :
385 : 0 : int __mmu_notifier_clear_young(struct mm_struct *mm,
386 : : unsigned long start,
387 : : unsigned long end)
388 : : {
389 : 0 : struct mmu_notifier *subscription;
390 : 0 : int young = 0, id;
391 : :
392 : 0 : id = srcu_read_lock(&srcu);
393 [ # # # # : 0 : hlist_for_each_entry_rcu(subscription,
# # ]
394 : : &mm->notifier_subscriptions->list, hlist,
395 : : srcu_read_lock_held(&srcu)) {
396 [ # # ]: 0 : if (subscription->ops->clear_young)
397 : 0 : young |= subscription->ops->clear_young(subscription,
398 : : mm, start, end);
399 : : }
400 : 0 : srcu_read_unlock(&srcu, id);
401 : :
402 : 0 : return young;
403 : : }
404 : :
405 : 0 : int __mmu_notifier_test_young(struct mm_struct *mm,
406 : : unsigned long address)
407 : : {
408 : 0 : struct mmu_notifier *subscription;
409 : 0 : int young = 0, id;
410 : :
411 : 0 : id = srcu_read_lock(&srcu);
412 [ # # # # : 0 : hlist_for_each_entry_rcu(subscription,
# # ]
413 : : &mm->notifier_subscriptions->list, hlist,
414 : : srcu_read_lock_held(&srcu)) {
415 [ # # ]: 0 : if (subscription->ops->test_young) {
416 : 0 : young = subscription->ops->test_young(subscription, mm,
417 : : address);
418 [ # # ]: 0 : if (young)
419 : : break;
420 : : }
421 : : }
422 : 0 : srcu_read_unlock(&srcu, id);
423 : :
424 : 0 : return young;
425 : : }
426 : :
427 : 0 : void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
428 : : pte_t pte)
429 : : {
430 : 0 : struct mmu_notifier *subscription;
431 : 0 : int id;
432 : :
433 : 0 : id = srcu_read_lock(&srcu);
434 [ # # # # : 0 : hlist_for_each_entry_rcu(subscription,
# # ]
435 : : &mm->notifier_subscriptions->list, hlist,
436 : : srcu_read_lock_held(&srcu)) {
437 [ # # ]: 0 : if (subscription->ops->change_pte)
438 : 0 : subscription->ops->change_pte(subscription, mm, address,
439 : : pte);
440 : : }
441 : 0 : srcu_read_unlock(&srcu, id);
442 : 0 : }
443 : :
444 : 0 : static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
445 : : const struct mmu_notifier_range *range)
446 : : {
447 : 0 : struct mmu_interval_notifier *interval_sub;
448 : 0 : unsigned long cur_seq;
449 : :
450 : 0 : for (interval_sub =
451 : 0 : mn_itree_inv_start_range(subscriptions, range, &cur_seq);
452 [ # # ]: 0 : interval_sub;
453 : 0 : interval_sub = mn_itree_inv_next(interval_sub, range)) {
454 : 0 : bool ret;
455 : :
456 : 0 : ret = interval_sub->ops->invalidate(interval_sub, range,
457 : : cur_seq);
458 [ # # ]: 0 : if (!ret) {
459 [ # # # # ]: 0 : if (WARN_ON(mmu_notifier_range_blockable(range)))
460 : 0 : continue;
461 : 0 : goto out_would_block;
462 : : }
463 : : }
464 : : return 0;
465 : :
466 : : out_would_block:
467 : : /*
468 : : * On -EAGAIN the non-blocking caller is not allowed to call
469 : : * invalidate_range_end()
470 : : */
471 : 0 : mn_itree_inv_end(subscriptions);
472 : 0 : return -EAGAIN;
473 : : }
474 : :
475 : 0 : static int mn_hlist_invalidate_range_start(
476 : : struct mmu_notifier_subscriptions *subscriptions,
477 : : struct mmu_notifier_range *range)
478 : : {
479 : 0 : struct mmu_notifier *subscription;
480 : 0 : int ret = 0;
481 : 0 : int id;
482 : :
483 : 0 : id = srcu_read_lock(&srcu);
484 [ # # # # : 0 : hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
# # ]
485 : : srcu_read_lock_held(&srcu)) {
486 : 0 : const struct mmu_notifier_ops *ops = subscription->ops;
487 : :
488 [ # # ]: 0 : if (ops->invalidate_range_start) {
489 : 0 : int _ret;
490 : :
491 : 0 : if (!mmu_notifier_range_blockable(range))
492 : 0 : non_block_start();
493 : 0 : _ret = ops->invalidate_range_start(subscription, range);
494 [ # # ]: 0 : if (!mmu_notifier_range_blockable(range))
495 : 0 : non_block_end();
496 [ # # ]: 0 : if (_ret) {
497 [ # # ]: 0 : pr_info("%pS callback failed with %d in %sblockable context.\n",
498 : : ops->invalidate_range_start, _ret,
499 : : !mmu_notifier_range_blockable(range) ?
500 : : "non-" :
501 : : "");
502 [ # # # # : 0 : WARN_ON(mmu_notifier_range_blockable(range) ||
# # ]
503 : : _ret != -EAGAIN);
504 : : ret = _ret;
505 : : }
506 : : }
507 : : }
508 : 0 : srcu_read_unlock(&srcu, id);
509 : :
510 : 0 : return ret;
511 : : }
512 : :
513 : 0 : int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
514 : : {
515 : 0 : struct mmu_notifier_subscriptions *subscriptions =
516 : 0 : range->mm->notifier_subscriptions;
517 : 0 : int ret;
518 : :
519 [ # # ]: 0 : if (subscriptions->has_itree) {
520 : 0 : ret = mn_itree_invalidate(subscriptions, range);
521 [ # # ]: 0 : if (ret)
522 : : return ret;
523 : : }
524 [ # # ]: 0 : if (!hlist_empty(&subscriptions->list))
525 : 0 : return mn_hlist_invalidate_range_start(subscriptions, range);
526 : : return 0;
527 : : }
528 : :
529 : : static void
530 : 0 : mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
531 : : struct mmu_notifier_range *range, bool only_end)
532 : : {
533 : 0 : struct mmu_notifier *subscription;
534 : 0 : int id;
535 : :
536 : 0 : id = srcu_read_lock(&srcu);
537 [ # # # # ]: 0 : hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
538 : : srcu_read_lock_held(&srcu)) {
539 : : /*
540 : : * Call invalidate_range here too to avoid the need for the
541 : : * subsystem of having to register an invalidate_range_end
542 : : * call-back when there is invalidate_range already. Usually a
543 : : * subsystem registers either invalidate_range_start()/end() or
544 : : * invalidate_range(), so this will be no additional overhead
545 : : * (besides the pointer check).
546 : : *
547 : : * We skip call to invalidate_range() if we know it is safe ie
548 : : * call site use mmu_notifier_invalidate_range_only_end() which
549 : : * is safe to do when we know that a call to invalidate_range()
550 : : * already happen under page table lock.
551 : : */
552 [ # # # # ]: 0 : if (!only_end && subscription->ops->invalidate_range)
553 : 0 : subscription->ops->invalidate_range(subscription,
554 : : range->mm,
555 : : range->start,
556 : : range->end);
557 [ # # ]: 0 : if (subscription->ops->invalidate_range_end) {
558 : 0 : if (!mmu_notifier_range_blockable(range))
559 : 0 : non_block_start();
560 : 0 : subscription->ops->invalidate_range_end(subscription,
561 : : range);
562 : 0 : if (!mmu_notifier_range_blockable(range))
563 [ # # ]: 0 : non_block_end();
564 : : }
565 : : }
566 : 0 : srcu_read_unlock(&srcu, id);
567 : 0 : }
568 : :
569 : 0 : void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
570 : : bool only_end)
571 : : {
572 : 0 : struct mmu_notifier_subscriptions *subscriptions =
573 : 0 : range->mm->notifier_subscriptions;
574 : :
575 : 0 : lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
576 [ # # ]: 0 : if (subscriptions->has_itree)
577 : 0 : mn_itree_inv_end(subscriptions);
578 : :
579 [ # # ]: 0 : if (!hlist_empty(&subscriptions->list))
580 : 0 : mn_hlist_invalidate_end(subscriptions, range, only_end);
581 : 0 : lock_map_release(&__mmu_notifier_invalidate_range_start_map);
582 : 0 : }
583 : :
584 : 0 : void __mmu_notifier_invalidate_range(struct mm_struct *mm,
585 : : unsigned long start, unsigned long end)
586 : : {
587 : 0 : struct mmu_notifier *subscription;
588 : 0 : int id;
589 : :
590 : 0 : id = srcu_read_lock(&srcu);
591 [ # # # # : 0 : hlist_for_each_entry_rcu(subscription,
# # ]
592 : : &mm->notifier_subscriptions->list, hlist,
593 : : srcu_read_lock_held(&srcu)) {
594 [ # # ]: 0 : if (subscription->ops->invalidate_range)
595 : 0 : subscription->ops->invalidate_range(subscription, mm,
596 : : start, end);
597 : : }
598 : 0 : srcu_read_unlock(&srcu, id);
599 : 0 : }
600 : :
601 : : /*
602 : : * Same as mmu_notifier_register but here the caller must hold the mmap_sem in
603 : : * write mode. A NULL mn signals the notifier is being registered for itree
604 : : * mode.
605 : : */
606 : 0 : int __mmu_notifier_register(struct mmu_notifier *subscription,
607 : : struct mm_struct *mm)
608 : : {
609 : 0 : struct mmu_notifier_subscriptions *subscriptions = NULL;
610 : 0 : int ret;
611 : :
612 : 0 : lockdep_assert_held_write(&mm->mmap_sem);
613 [ # # ]: 0 : BUG_ON(atomic_read(&mm->mm_users) <= 0);
614 : :
615 : 0 : if (IS_ENABLED(CONFIG_LOCKDEP)) {
616 : : fs_reclaim_acquire(GFP_KERNEL);
617 : : lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
618 : : lock_map_release(&__mmu_notifier_invalidate_range_start_map);
619 : : fs_reclaim_release(GFP_KERNEL);
620 : : }
621 : :
622 [ # # ]: 0 : if (!mm->notifier_subscriptions) {
623 : : /*
624 : : * kmalloc cannot be called under mm_take_all_locks(), but we
625 : : * know that mm->notifier_subscriptions can't change while we
626 : : * hold the write side of the mmap_sem.
627 : : */
628 : 0 : subscriptions = kzalloc(
629 : : sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
630 [ # # ]: 0 : if (!subscriptions)
631 : : return -ENOMEM;
632 : :
633 : 0 : INIT_HLIST_HEAD(&subscriptions->list);
634 : 0 : spin_lock_init(&subscriptions->lock);
635 : 0 : subscriptions->invalidate_seq = 2;
636 : 0 : subscriptions->itree = RB_ROOT_CACHED;
637 : 0 : init_waitqueue_head(&subscriptions->wq);
638 : 0 : INIT_HLIST_HEAD(&subscriptions->deferred_list);
639 : : }
640 : :
641 : 0 : ret = mm_take_all_locks(mm);
642 [ # # ]: 0 : if (unlikely(ret))
643 : 0 : goto out_clean;
644 : :
645 : : /*
646 : : * Serialize the update against mmu_notifier_unregister. A
647 : : * side note: mmu_notifier_release can't run concurrently with
648 : : * us because we hold the mm_users pin (either implicitly as
649 : : * current->mm or explicitly with get_task_mm() or similar).
650 : : * We can't race against any other mmu notifier method either
651 : : * thanks to mm_take_all_locks().
652 : : *
653 : : * release semantics on the initialization of the
654 : : * mmu_notifier_subscriptions's contents are provided for unlocked
655 : : * readers. acquire can only be used while holding the mmgrab or
656 : : * mmget, and is safe because once created the
657 : : * mmu_notifier_subscriptions is not freed until the mm is destroyed.
658 : : * As above, users holding the mmap_sem or one of the
659 : : * mm_take_all_locks() do not need to use acquire semantics.
660 : : */
661 [ # # ]: 0 : if (subscriptions)
662 : 0 : smp_store_release(&mm->notifier_subscriptions, subscriptions);
663 : :
664 [ # # ]: 0 : if (subscription) {
665 : : /* Pairs with the mmdrop in mmu_notifier_unregister_* */
666 : 0 : mmgrab(mm);
667 : 0 : subscription->mm = mm;
668 : 0 : subscription->users = 1;
669 : :
670 : 0 : spin_lock(&mm->notifier_subscriptions->lock);
671 : 0 : hlist_add_head_rcu(&subscription->hlist,
672 : 0 : &mm->notifier_subscriptions->list);
673 : 0 : spin_unlock(&mm->notifier_subscriptions->lock);
674 : : } else
675 : 0 : mm->notifier_subscriptions->has_itree = true;
676 : :
677 : 0 : mm_drop_all_locks(mm);
678 [ # # ]: 0 : BUG_ON(atomic_read(&mm->mm_users) <= 0);
679 : : return 0;
680 : :
681 : : out_clean:
682 : 0 : kfree(subscriptions);
683 : 0 : return ret;
684 : : }
685 : : EXPORT_SYMBOL_GPL(__mmu_notifier_register);
686 : :
687 : : /**
688 : : * mmu_notifier_register - Register a notifier on a mm
689 : : * @mn: The notifier to attach
690 : : * @mm: The mm to attach the notifier to
691 : : *
692 : : * Must not hold mmap_sem nor any other VM related lock when calling
693 : : * this registration function. Must also ensure mm_users can't go down
694 : : * to zero while this runs to avoid races with mmu_notifier_release,
695 : : * so mm has to be current->mm or the mm should be pinned safely such
696 : : * as with get_task_mm(). If the mm is not current->mm, the mm_users
697 : : * pin should be released by calling mmput after mmu_notifier_register
698 : : * returns.
699 : : *
700 : : * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
701 : : * unregister the notifier.
702 : : *
703 : : * While the caller has a mmu_notifier get the subscription->mm pointer will remain
704 : : * valid, and can be converted to an active mm pointer via mmget_not_zero().
705 : : */
706 : 0 : int mmu_notifier_register(struct mmu_notifier *subscription,
707 : : struct mm_struct *mm)
708 : : {
709 : 0 : int ret;
710 : :
711 : 0 : down_write(&mm->mmap_sem);
712 : 0 : ret = __mmu_notifier_register(subscription, mm);
713 : 0 : up_write(&mm->mmap_sem);
714 : 0 : return ret;
715 : : }
716 : : EXPORT_SYMBOL_GPL(mmu_notifier_register);
717 : :
718 : : static struct mmu_notifier *
719 : 0 : find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
720 : : {
721 : 0 : struct mmu_notifier *subscription;
722 : :
723 : 0 : spin_lock(&mm->notifier_subscriptions->lock);
724 [ # # # # ]: 0 : hlist_for_each_entry_rcu(subscription,
725 : : &mm->notifier_subscriptions->list, hlist,
726 : : lockdep_is_held(&mm->notifier_subscriptions->lock)) {
727 [ # # ]: 0 : if (subscription->ops != ops)
728 [ # # ]: 0 : continue;
729 : :
730 [ # # ]: 0 : if (likely(subscription->users != UINT_MAX))
731 : 0 : subscription->users++;
732 : : else
733 : : subscription = ERR_PTR(-EOVERFLOW);
734 : 0 : spin_unlock(&mm->notifier_subscriptions->lock);
735 : 0 : return subscription;
736 : : }
737 : 0 : spin_unlock(&mm->notifier_subscriptions->lock);
738 : 0 : return NULL;
739 : : }
740 : :
741 : : /**
742 : : * mmu_notifier_get_locked - Return the single struct mmu_notifier for
743 : : * the mm & ops
744 : : * @ops: The operations struct being subscribe with
745 : : * @mm : The mm to attach notifiers too
746 : : *
747 : : * This function either allocates a new mmu_notifier via
748 : : * ops->alloc_notifier(), or returns an already existing notifier on the
749 : : * list. The value of the ops pointer is used to determine when two notifiers
750 : : * are the same.
751 : : *
752 : : * Each call to mmu_notifier_get() must be paired with a call to
753 : : * mmu_notifier_put(). The caller must hold the write side of mm->mmap_sem.
754 : : *
755 : : * While the caller has a mmu_notifier get the mm pointer will remain valid,
756 : : * and can be converted to an active mm pointer via mmget_not_zero().
757 : : */
758 : 0 : struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
759 : : struct mm_struct *mm)
760 : : {
761 : 0 : struct mmu_notifier *subscription;
762 : 0 : int ret;
763 : :
764 : 0 : lockdep_assert_held_write(&mm->mmap_sem);
765 : :
766 [ # # ]: 0 : if (mm->notifier_subscriptions) {
767 : 0 : subscription = find_get_mmu_notifier(mm, ops);
768 [ # # ]: 0 : if (subscription)
769 : : return subscription;
770 : : }
771 : :
772 : 0 : subscription = ops->alloc_notifier(mm);
773 [ # # ]: 0 : if (IS_ERR(subscription))
774 : : return subscription;
775 : 0 : subscription->ops = ops;
776 : 0 : ret = __mmu_notifier_register(subscription, mm);
777 [ # # ]: 0 : if (ret)
778 : 0 : goto out_free;
779 : : return subscription;
780 : : out_free:
781 : 0 : subscription->ops->free_notifier(subscription);
782 : 0 : return ERR_PTR(ret);
783 : : }
784 : : EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
785 : :
786 : : /* this is called after the last mmu_notifier_unregister() returned */
787 : 0 : void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
788 : : {
789 [ # # ]: 0 : BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
790 : 0 : kfree(mm->notifier_subscriptions);
791 : 0 : mm->notifier_subscriptions = LIST_POISON1; /* debug */
792 : 0 : }
793 : :
794 : : /*
795 : : * This releases the mm_count pin automatically and frees the mm
796 : : * structure if it was the last user of it. It serializes against
797 : : * running mmu notifiers with SRCU and against mmu_notifier_unregister
798 : : * with the unregister lock + SRCU. All sptes must be dropped before
799 : : * calling mmu_notifier_unregister. ->release or any other notifier
800 : : * method may be invoked concurrently with mmu_notifier_unregister,
801 : : * and only after mmu_notifier_unregister returned we're guaranteed
802 : : * that ->release or any other method can't run anymore.
803 : : */
804 : 0 : void mmu_notifier_unregister(struct mmu_notifier *subscription,
805 : : struct mm_struct *mm)
806 : : {
807 [ # # ]: 0 : BUG_ON(atomic_read(&mm->mm_count) <= 0);
808 : :
809 [ # # ]: 0 : if (!hlist_unhashed(&subscription->hlist)) {
810 : : /*
811 : : * SRCU here will force exit_mmap to wait for ->release to
812 : : * finish before freeing the pages.
813 : : */
814 : 0 : int id;
815 : :
816 : 0 : id = srcu_read_lock(&srcu);
817 : : /*
818 : : * exit_mmap will block in mmu_notifier_release to guarantee
819 : : * that ->release is called before freeing the pages.
820 : : */
821 [ # # ]: 0 : if (subscription->ops->release)
822 : 0 : subscription->ops->release(subscription, mm);
823 : 0 : srcu_read_unlock(&srcu, id);
824 : :
825 : 0 : spin_lock(&mm->notifier_subscriptions->lock);
826 : : /*
827 : : * Can not use list_del_rcu() since __mmu_notifier_release
828 : : * can delete it before we hold the lock.
829 : : */
830 [ # # ]: 0 : hlist_del_init_rcu(&subscription->hlist);
831 : 0 : spin_unlock(&mm->notifier_subscriptions->lock);
832 : : }
833 : :
834 : : /*
835 : : * Wait for any running method to finish, of course including
836 : : * ->release if it was run by mmu_notifier_release instead of us.
837 : : */
838 : 0 : synchronize_srcu(&srcu);
839 : :
840 [ # # ]: 0 : BUG_ON(atomic_read(&mm->mm_count) <= 0);
841 : :
842 : 0 : mmdrop(mm);
843 : 0 : }
844 : : EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
845 : :
846 : 0 : static void mmu_notifier_free_rcu(struct rcu_head *rcu)
847 : : {
848 : 0 : struct mmu_notifier *subscription =
849 : 0 : container_of(rcu, struct mmu_notifier, rcu);
850 : 0 : struct mm_struct *mm = subscription->mm;
851 : :
852 : 0 : subscription->ops->free_notifier(subscription);
853 : : /* Pairs with the get in __mmu_notifier_register() */
854 : 0 : mmdrop(mm);
855 : 0 : }
856 : :
857 : : /**
858 : : * mmu_notifier_put - Release the reference on the notifier
859 : : * @mn: The notifier to act on
860 : : *
861 : : * This function must be paired with each mmu_notifier_get(), it releases the
862 : : * reference obtained by the get. If this is the last reference then process
863 : : * to free the notifier will be run asynchronously.
864 : : *
865 : : * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
866 : : * when the mm_struct is destroyed. Instead free_notifier is always called to
867 : : * release any resources held by the user.
868 : : *
869 : : * As ops->release is not guaranteed to be called, the user must ensure that
870 : : * all sptes are dropped, and no new sptes can be established before
871 : : * mmu_notifier_put() is called.
872 : : *
873 : : * This function can be called from the ops->release callback, however the
874 : : * caller must still ensure it is called pairwise with mmu_notifier_get().
875 : : *
876 : : * Modules calling this function must call mmu_notifier_synchronize() in
877 : : * their __exit functions to ensure the async work is completed.
878 : : */
879 : 0 : void mmu_notifier_put(struct mmu_notifier *subscription)
880 : : {
881 : 0 : struct mm_struct *mm = subscription->mm;
882 : :
883 : 0 : spin_lock(&mm->notifier_subscriptions->lock);
884 [ # # # # : 0 : if (WARN_ON(!subscription->users) || --subscription->users)
# # ]
885 : 0 : goto out_unlock;
886 [ # # ]: 0 : hlist_del_init_rcu(&subscription->hlist);
887 : 0 : spin_unlock(&mm->notifier_subscriptions->lock);
888 : :
889 : 0 : call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
890 : 0 : return;
891 : :
892 : : out_unlock:
893 : 0 : spin_unlock(&mm->notifier_subscriptions->lock);
894 : : }
895 : : EXPORT_SYMBOL_GPL(mmu_notifier_put);
896 : :
897 : 0 : static int __mmu_interval_notifier_insert(
898 : : struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
899 : : struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
900 : : unsigned long length, const struct mmu_interval_notifier_ops *ops)
901 : : {
902 : 0 : interval_sub->mm = mm;
903 : 0 : interval_sub->ops = ops;
904 : 0 : RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
905 : 0 : interval_sub->interval_tree.start = start;
906 : : /*
907 : : * Note that the representation of the intervals in the interval tree
908 : : * considers the ending point as contained in the interval.
909 : : */
910 [ # # ]: 0 : if (length == 0 ||
911 [ # # ]: 0 : check_add_overflow(start, length - 1,
912 : : &interval_sub->interval_tree.last))
913 : 0 : return -EOVERFLOW;
914 : :
915 : : /* Must call with a mmget() held */
916 [ # # # # ]: 0 : if (WARN_ON(atomic_read(&mm->mm_count) <= 0))
917 : : return -EINVAL;
918 : :
919 : : /* pairs with mmdrop in mmu_interval_notifier_remove() */
920 : 0 : mmgrab(mm);
921 : :
922 : : /*
923 : : * If some invalidate_range_start/end region is going on in parallel
924 : : * we don't know what VA ranges are affected, so we must assume this
925 : : * new range is included.
926 : : *
927 : : * If the itree is invalidating then we are not allowed to change
928 : : * it. Retrying until invalidation is done is tricky due to the
929 : : * possibility for live lock, instead defer the add to
930 : : * mn_itree_inv_end() so this algorithm is deterministic.
931 : : *
932 : : * In all cases the value for the interval_sub->invalidate_seq should be
933 : : * odd, see mmu_interval_read_begin()
934 : : */
935 : 0 : spin_lock(&subscriptions->lock);
936 [ # # ]: 0 : if (subscriptions->active_invalidate_ranges) {
937 [ # # ]: 0 : if (mn_itree_is_invalidating(subscriptions))
938 [ # # ]: 0 : hlist_add_head(&interval_sub->deferred_item,
939 : : &subscriptions->deferred_list);
940 : : else {
941 : 0 : subscriptions->invalidate_seq |= 1;
942 : 0 : interval_tree_insert(&interval_sub->interval_tree,
943 : : &subscriptions->itree);
944 : : }
945 : 0 : interval_sub->invalidate_seq = subscriptions->invalidate_seq;
946 : : } else {
947 [ # # ]: 0 : WARN_ON(mn_itree_is_invalidating(subscriptions));
948 : : /*
949 : : * The starting seq for a subscription not under invalidation
950 : : * should be odd, not equal to the current invalidate_seq and
951 : : * invalidate_seq should not 'wrap' to the new seq any time
952 : : * soon.
953 : : */
954 : 0 : interval_sub->invalidate_seq =
955 : 0 : subscriptions->invalidate_seq - 1;
956 : 0 : interval_tree_insert(&interval_sub->interval_tree,
957 : : &subscriptions->itree);
958 : : }
959 : 0 : spin_unlock(&subscriptions->lock);
960 : 0 : return 0;
961 : : }
962 : :
963 : : /**
964 : : * mmu_interval_notifier_insert - Insert an interval notifier
965 : : * @interval_sub: Interval subscription to register
966 : : * @start: Starting virtual address to monitor
967 : : * @length: Length of the range to monitor
968 : : * @mm : mm_struct to attach to
969 : : *
970 : : * This function subscribes the interval notifier for notifications from the
971 : : * mm. Upon return the ops related to mmu_interval_notifier will be called
972 : : * whenever an event that intersects with the given range occurs.
973 : : *
974 : : * Upon return the range_notifier may not be present in the interval tree yet.
975 : : * The caller must use the normal interval notifier read flow via
976 : : * mmu_interval_read_begin() to establish SPTEs for this range.
977 : : */
978 : 0 : int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
979 : : struct mm_struct *mm, unsigned long start,
980 : : unsigned long length,
981 : : const struct mmu_interval_notifier_ops *ops)
982 : : {
983 : 0 : struct mmu_notifier_subscriptions *subscriptions;
984 : 0 : int ret;
985 : :
986 : 0 : might_lock(&mm->mmap_sem);
987 : :
988 : 0 : subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
989 [ # # # # ]: 0 : if (!subscriptions || !subscriptions->has_itree) {
990 : 0 : ret = mmu_notifier_register(NULL, mm);
991 [ # # ]: 0 : if (ret)
992 : : return ret;
993 : 0 : subscriptions = mm->notifier_subscriptions;
994 : : }
995 : 0 : return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
996 : : start, length, ops);
997 : : }
998 : : EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
999 : :
1000 : 0 : int mmu_interval_notifier_insert_locked(
1001 : : struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
1002 : : unsigned long start, unsigned long length,
1003 : : const struct mmu_interval_notifier_ops *ops)
1004 : : {
1005 : 0 : struct mmu_notifier_subscriptions *subscriptions =
1006 : : mm->notifier_subscriptions;
1007 : 0 : int ret;
1008 : :
1009 : 0 : lockdep_assert_held_write(&mm->mmap_sem);
1010 : :
1011 [ # # # # ]: 0 : if (!subscriptions || !subscriptions->has_itree) {
1012 : 0 : ret = __mmu_notifier_register(NULL, mm);
1013 [ # # ]: 0 : if (ret)
1014 : : return ret;
1015 : 0 : subscriptions = mm->notifier_subscriptions;
1016 : : }
1017 : 0 : return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1018 : : start, length, ops);
1019 : : }
1020 : : EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1021 : :
1022 : : /**
1023 : : * mmu_interval_notifier_remove - Remove a interval notifier
1024 : : * @interval_sub: Interval subscription to unregister
1025 : : *
1026 : : * This function must be paired with mmu_interval_notifier_insert(). It cannot
1027 : : * be called from any ops callback.
1028 : : *
1029 : : * Once this returns ops callbacks are no longer running on other CPUs and
1030 : : * will not be called in future.
1031 : : */
1032 : 0 : void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
1033 : : {
1034 : 0 : struct mm_struct *mm = interval_sub->mm;
1035 : 0 : struct mmu_notifier_subscriptions *subscriptions =
1036 : : mm->notifier_subscriptions;
1037 : 0 : unsigned long seq = 0;
1038 : :
1039 : 0 : might_sleep();
1040 : :
1041 : 0 : spin_lock(&subscriptions->lock);
1042 [ # # ]: 0 : if (mn_itree_is_invalidating(subscriptions)) {
1043 : : /*
1044 : : * remove is being called after insert put this on the
1045 : : * deferred list, but before the deferred list was processed.
1046 : : */
1047 [ # # ]: 0 : if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1048 [ # # ]: 0 : hlist_del(&interval_sub->deferred_item);
1049 : : } else {
1050 [ # # ]: 0 : hlist_add_head(&interval_sub->deferred_item,
1051 : : &subscriptions->deferred_list);
1052 : 0 : seq = subscriptions->invalidate_seq;
1053 : : }
1054 : : } else {
1055 [ # # ]: 0 : WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1056 : 0 : interval_tree_remove(&interval_sub->interval_tree,
1057 : : &subscriptions->itree);
1058 : : }
1059 : 0 : spin_unlock(&subscriptions->lock);
1060 : :
1061 : : /*
1062 : : * The possible sleep on progress in the invalidation requires the
1063 : : * caller not hold any locks held by invalidation callbacks.
1064 : : */
1065 : 0 : lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1066 : 0 : lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1067 [ # # ]: 0 : if (seq)
1068 [ # # # # ]: 0 : wait_event(subscriptions->wq,
1069 : : READ_ONCE(subscriptions->invalidate_seq) != seq);
1070 : :
1071 : : /* pairs with mmgrab in mmu_interval_notifier_insert() */
1072 : 0 : mmdrop(mm);
1073 : 0 : }
1074 : : EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1075 : :
1076 : : /**
1077 : : * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
1078 : : *
1079 : : * This function ensures that all outstanding async SRU work from
1080 : : * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
1081 : : * associated with an unused mmu_notifier will no longer be called.
1082 : : *
1083 : : * Before using the caller must ensure that all of its mmu_notifiers have been
1084 : : * fully released via mmu_notifier_put().
1085 : : *
1086 : : * Modules using the mmu_notifier_put() API should call this in their __exit
1087 : : * function to avoid module unloading races.
1088 : : */
1089 : 0 : void mmu_notifier_synchronize(void)
1090 : : {
1091 : 0 : synchronize_srcu(&srcu);
1092 : 0 : }
1093 : : EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
1094 : :
1095 : : bool
1096 : 0 : mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
1097 : : {
1098 [ # # # # ]: 0 : if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
1099 : : return false;
1100 : : /* Return true if the vma still have the read flag set. */
1101 : 0 : return range->vma->vm_flags & VM_READ;
1102 : : }
1103 : : EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
|