blob: 24eb9d1ed0a73da14e192500eda8048f1d5ce27d [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002/*
3 * linux/mm/mmu_notifier.c
4 *
5 * Copyright (C) 2008 Qumranet, Inc.
6 * Copyright (C) 2008 SGI
Christoph Lameter93e205a2016-03-17 14:21:15 -07007 * Christoph Lameter <cl@linux.com>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07008 */
9
10#include <linux/rculist.h>
11#include <linux/mmu_notifier.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040012#include <linux/export.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070013#include <linux/mm.h>
14#include <linux/err.h>
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040015#include <linux/interval_tree.h>
Sagi Grimberg21a92732012-10-08 16:29:24 -070016#include <linux/srcu.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070017#include <linux/rcupdate.h>
18#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010019#include <linux/sched/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070021
Sagi Grimberg21a92732012-10-08 16:29:24 -070022/* global SRCU for all MMs */
Paul E. McKenneydde8da62017-03-25 10:42:07 -070023DEFINE_STATIC_SRCU(srcu);
Sagi Grimberg21a92732012-10-08 16:29:24 -070024
Daniel Vetter23b68392019-08-26 22:14:21 +020025#ifdef CONFIG_LOCKDEP
26struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
27 .name = "mmu_notifier_invalidate_range_start"
28};
29#endif
30
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070031/*
Jason Gunthorpe984cfe42019-12-18 13:40:35 -040032 * The mmu_notifier_subscriptions structure is allocated and installed in
33 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
Jason Gunthorpe56f434f2019-11-12 16:22:18 -040034 * critical section and it's released only when mm_count reaches zero
35 * in mmdrop().
36 */
Jason Gunthorpe984cfe42019-12-18 13:40:35 -040037struct mmu_notifier_subscriptions {
Jason Gunthorpe56f434f2019-11-12 16:22:18 -040038 /* all mmu notifiers registered in this mm are queued in this list */
39 struct hlist_head list;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040040 bool has_itree;
Jason Gunthorpe56f434f2019-11-12 16:22:18 -040041 /* to serialize the list modifications and hlist_unhashed */
42 spinlock_t lock;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040043 unsigned long invalidate_seq;
44 unsigned long active_invalidate_ranges;
45 struct rb_root_cached itree;
46 wait_queue_head_t wq;
47 struct hlist_head deferred_list;
Jason Gunthorpe56f434f2019-11-12 16:22:18 -040048};
49
50/*
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040051 * This is a collision-retry read-side/write-side 'lock', a lot like a
52 * seqcount, however this allows multiple write-sides to hold it at
53 * once. Conceptually the write side is protecting the values of the PTEs in
54 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
55 * writer exists.
56 *
57 * Note that the core mm creates nested invalidate_range_start()/end() regions
58 * within the same thread, and runs invalidate_range_start()/end() in parallel
59 * on multiple CPUs. This is designed to not reduce concurrency or block
60 * progress on the mm side.
61 *
62 * As a secondary function, holding the full write side also serves to prevent
63 * writers for the itree, this is an optimization to avoid extra locking
64 * during invalidate_range_start/end notifiers.
65 *
66 * The write side has two states, fully excluded:
67 * - mm->active_invalidate_ranges != 0
Jason Gunthorpe984cfe42019-12-18 13:40:35 -040068 * - subscriptions->invalidate_seq & 1 == True (odd)
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040069 * - some range on the mm_struct is being invalidated
70 * - the itree is not allowed to change
71 *
72 * And partially excluded:
73 * - mm->active_invalidate_ranges != 0
Jason Gunthorpe984cfe42019-12-18 13:40:35 -040074 * - subscriptions->invalidate_seq & 1 == False (even)
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040075 * - some range on the mm_struct is being invalidated
76 * - the itree is allowed to change
77 *
Jason Gunthorpe984cfe42019-12-18 13:40:35 -040078 * Operations on notifier_subscriptions->invalidate_seq (under spinlock):
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040079 * seq |= 1 # Begin writing
80 * seq++ # Release the writing state
81 * seq & 1 # True if a writer exists
82 *
83 * The later state avoids some expensive work on inv_end in the common case of
Jason Gunthorpe5292e242020-01-14 11:29:52 -040084 * no mmu_interval_notifier monitoring the VA.
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040085 */
Jason Gunthorpe984cfe42019-12-18 13:40:35 -040086static bool
87mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040088{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -040089 lockdep_assert_held(&subscriptions->lock);
90 return subscriptions->invalidate_seq & 1;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040091}
92
93static struct mmu_interval_notifier *
Jason Gunthorpe984cfe42019-12-18 13:40:35 -040094mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040095 const struct mmu_notifier_range *range,
96 unsigned long *seq)
97{
98 struct interval_tree_node *node;
99 struct mmu_interval_notifier *res = NULL;
100
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400101 spin_lock(&subscriptions->lock);
102 subscriptions->active_invalidate_ranges++;
103 node = interval_tree_iter_first(&subscriptions->itree, range->start,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400104 range->end - 1);
105 if (node) {
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400106 subscriptions->invalidate_seq |= 1;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400107 res = container_of(node, struct mmu_interval_notifier,
108 interval_tree);
109 }
110
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400111 *seq = subscriptions->invalidate_seq;
112 spin_unlock(&subscriptions->lock);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400113 return res;
114}
115
116static struct mmu_interval_notifier *
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400117mn_itree_inv_next(struct mmu_interval_notifier *interval_sub,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400118 const struct mmu_notifier_range *range)
119{
120 struct interval_tree_node *node;
121
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400122 node = interval_tree_iter_next(&interval_sub->interval_tree,
123 range->start, range->end - 1);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400124 if (!node)
125 return NULL;
126 return container_of(node, struct mmu_interval_notifier, interval_tree);
127}
128
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400129static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400130{
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400131 struct mmu_interval_notifier *interval_sub;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400132 struct hlist_node *next;
133
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400134 spin_lock(&subscriptions->lock);
135 if (--subscriptions->active_invalidate_ranges ||
136 !mn_itree_is_invalidating(subscriptions)) {
137 spin_unlock(&subscriptions->lock);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400138 return;
139 }
140
141 /* Make invalidate_seq even */
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400142 subscriptions->invalidate_seq++;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400143
144 /*
145 * The inv_end incorporates a deferred mechanism like rtnl_unlock().
146 * Adds and removes are queued until the final inv_end happens then
147 * they are progressed. This arrangement for tree updates is used to
148 * avoid using a blocking lock during invalidate_range_start.
149 */
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400150 hlist_for_each_entry_safe(interval_sub, next,
151 &subscriptions->deferred_list,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400152 deferred_item) {
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400153 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
154 interval_tree_insert(&interval_sub->interval_tree,
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400155 &subscriptions->itree);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400156 else
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400157 interval_tree_remove(&interval_sub->interval_tree,
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400158 &subscriptions->itree);
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400159 hlist_del(&interval_sub->deferred_item);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400160 }
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400161 spin_unlock(&subscriptions->lock);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400162
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400163 wake_up_all(&subscriptions->wq);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400164}
165
166/**
167 * mmu_interval_read_begin - Begin a read side critical section against a VA
168 * range
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400169 * interval_sub: The interval subscription
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400170 *
171 * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400172 * collision-retry scheme similar to seqcount for the VA range under
173 * subscription. If the mm invokes invalidation during the critical section
174 * then mmu_interval_read_retry() will return true.
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400175 *
176 * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
177 * require a blocking context. The critical region formed by this can sleep,
178 * and the required 'user_lock' can also be a sleeping lock.
179 *
180 * The caller is required to provide a 'user_lock' to serialize both teardown
181 * and setup.
182 *
183 * The return value should be passed to mmu_interval_read_retry().
184 */
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400185unsigned long
186mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub)
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400187{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400188 struct mmu_notifier_subscriptions *subscriptions =
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400189 interval_sub->mm->notifier_subscriptions;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400190 unsigned long seq;
191 bool is_invalidating;
192
193 /*
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400194 * If the subscription has a different seq value under the user_lock
195 * than we started with then it has collided.
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400196 *
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400197 * If the subscription currently has the same seq value as the
198 * subscriptions seq, then it is currently between
199 * invalidate_start/end and is colliding.
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400200 *
201 * The locking looks broadly like this:
202 * mn_tree_invalidate_start(): mmu_interval_read_begin():
203 * spin_lock
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400204 * seq = READ_ONCE(interval_sub->invalidate_seq);
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400205 * seq == subs->invalidate_seq
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400206 * spin_unlock
207 * spin_lock
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400208 * seq = ++subscriptions->invalidate_seq
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400209 * spin_unlock
210 * op->invalidate_range():
211 * user_lock
212 * mmu_interval_set_seq()
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400213 * interval_sub->invalidate_seq = seq
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400214 * user_unlock
215 *
216 * [Required: mmu_interval_read_retry() == true]
217 *
218 * mn_itree_inv_end():
219 * spin_lock
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400220 * seq = ++subscriptions->invalidate_seq
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400221 * spin_unlock
222 *
223 * user_lock
224 * mmu_interval_read_retry():
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400225 * interval_sub->invalidate_seq != seq
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400226 * user_unlock
227 *
228 * Barriers are not needed here as any races here are closed by an
229 * eventual mmu_interval_read_retry(), which provides a barrier via the
230 * user_lock.
231 */
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400232 spin_lock(&subscriptions->lock);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400233 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400234 seq = READ_ONCE(interval_sub->invalidate_seq);
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400235 is_invalidating = seq == subscriptions->invalidate_seq;
236 spin_unlock(&subscriptions->lock);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400237
238 /*
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400239 * interval_sub->invalidate_seq must always be set to an odd value via
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400240 * mmu_interval_set_seq() using the provided cur_seq from
241 * mn_itree_inv_start_range(). This ensures that if seq does wrap we
242 * will always clear the below sleep in some reasonable time as
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400243 * subscriptions->invalidate_seq is even in the idle state.
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400244 */
245 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
246 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
247 if (is_invalidating)
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400248 wait_event(subscriptions->wq,
249 READ_ONCE(subscriptions->invalidate_seq) != seq);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400250
251 /*
252 * Notice that mmu_interval_read_retry() can already be true at this
253 * point, avoiding loops here allows the caller to provide a global
254 * time bound.
255 */
256
257 return seq;
258}
259EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
260
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400261static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400262 struct mm_struct *mm)
263{
264 struct mmu_notifier_range range = {
265 .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
266 .event = MMU_NOTIFY_RELEASE,
267 .mm = mm,
268 .start = 0,
269 .end = ULONG_MAX,
270 };
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400271 struct mmu_interval_notifier *interval_sub;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400272 unsigned long cur_seq;
273 bool ret;
274
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400275 for (interval_sub =
276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
277 interval_sub;
278 interval_sub = mn_itree_inv_next(interval_sub, &range)) {
279 ret = interval_sub->ops->invalidate(interval_sub, &range,
280 cur_seq);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400281 WARN_ON(!ret);
282 }
283
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400284 mn_itree_inv_end(subscriptions);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400285}
286
287/*
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700288 * This function can't run concurrently against mmu_notifier_register
289 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
290 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
291 * in parallel despite there being no task using this mm any more,
292 * through the vmas outside of the exit_mmap context, such as with
293 * vmtruncate. This serializes against mmu_notifier_unregister with
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400294 * the notifier_subscriptions->lock in addition to SRCU and it serializes
295 * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700296 * can't go away from under us as exit_mmap holds an mm_count pin
297 * itself.
298 */
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400299static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400300 struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700301{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400302 struct mmu_notifier *subscription;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700303 int id;
Xiao Guangrong3ad3d902012-07-31 16:45:52 -0700304
305 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700306 * SRCU here will block mmu_notifier_unregister until
307 * ->release returns.
Xiao Guangrong3ad3d902012-07-31 16:45:52 -0700308 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700309 id = srcu_read_lock(&srcu);
Qian Cai63886ba2020-03-21 18:22:34 -0700310 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
311 srcu_read_lock_held(&srcu))
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700312 /*
313 * If ->release runs before mmu_notifier_unregister it must be
314 * handled, as it's the only way for the driver to flush all
315 * existing sptes and stop the driver from establishing any more
316 * sptes before all the pages in the mm are freed.
317 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400318 if (subscription->ops->release)
319 subscription->ops->release(subscription, mm);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700320
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400321 spin_lock(&subscriptions->lock);
322 while (unlikely(!hlist_empty(&subscriptions->list))) {
Jason Gunthorpe19917222020-01-14 11:11:17 -0400323 subscription = hlist_entry(subscriptions->list.first,
324 struct mmu_notifier, hlist);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700325 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700326 * We arrived before mmu_notifier_unregister so
327 * mmu_notifier_unregister will do nothing other than to wait
328 * for ->release to finish and for mmu_notifier_unregister to
329 * return.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700330 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400331 hlist_del_init_rcu(&subscription->hlist);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700332 }
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400333 spin_unlock(&subscriptions->lock);
Peter Zijlstrab9722162014-08-06 16:08:20 -0700334 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700335
336 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700337 * synchronize_srcu here prevents mmu_notifier_release from returning to
338 * exit_mmap (which would proceed with freeing all pages in the mm)
339 * until the ->release method returns, if it was invoked by
340 * mmu_notifier_unregister.
341 *
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400342 * The notifier_subscriptions can't go away from under us because
343 * one mm_count is held by exit_mmap.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700344 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700345 synchronize_srcu(&srcu);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700346}
347
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400348void __mmu_notifier_release(struct mm_struct *mm)
349{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400350 struct mmu_notifier_subscriptions *subscriptions =
351 mm->notifier_subscriptions;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400352
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400353 if (subscriptions->has_itree)
354 mn_itree_release(subscriptions, mm);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400355
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400356 if (!hlist_empty(&subscriptions->list))
357 mn_hlist_release(subscriptions, mm);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400358}
359
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700360/*
361 * If no young bitflag is supported by the hardware, ->clear_flush_young can
362 * unmap the address and return 1 or 0 depending if the mapping previously
363 * existed or not.
364 */
365int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700366 unsigned long start,
367 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700368{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400369 struct mmu_notifier *subscription;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700370 int young = 0, id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700371
Sagi Grimberg21a92732012-10-08 16:29:24 -0700372 id = srcu_read_lock(&srcu);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400373 hlist_for_each_entry_rcu(subscription,
Qian Cai63886ba2020-03-21 18:22:34 -0700374 &mm->notifier_subscriptions->list, hlist,
375 srcu_read_lock_held(&srcu)) {
Jason Gunthorpe19917222020-01-14 11:11:17 -0400376 if (subscription->ops->clear_flush_young)
377 young |= subscription->ops->clear_flush_young(
378 subscription, mm, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700379 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700380 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700381
382 return young;
383}
384
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700385int __mmu_notifier_clear_young(struct mm_struct *mm,
386 unsigned long start,
387 unsigned long end)
388{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400389 struct mmu_notifier *subscription;
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700390 int young = 0, id;
391
392 id = srcu_read_lock(&srcu);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400393 hlist_for_each_entry_rcu(subscription,
Qian Cai63886ba2020-03-21 18:22:34 -0700394 &mm->notifier_subscriptions->list, hlist,
395 srcu_read_lock_held(&srcu)) {
Jason Gunthorpe19917222020-01-14 11:11:17 -0400396 if (subscription->ops->clear_young)
397 young |= subscription->ops->clear_young(subscription,
398 mm, start, end);
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700399 }
400 srcu_read_unlock(&srcu, id);
401
402 return young;
403}
404
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800405int __mmu_notifier_test_young(struct mm_struct *mm,
406 unsigned long address)
407{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400408 struct mmu_notifier *subscription;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700409 int young = 0, id;
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800410
Sagi Grimberg21a92732012-10-08 16:29:24 -0700411 id = srcu_read_lock(&srcu);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400412 hlist_for_each_entry_rcu(subscription,
Qian Cai63886ba2020-03-21 18:22:34 -0700413 &mm->notifier_subscriptions->list, hlist,
414 srcu_read_lock_held(&srcu)) {
Jason Gunthorpe19917222020-01-14 11:11:17 -0400415 if (subscription->ops->test_young) {
416 young = subscription->ops->test_young(subscription, mm,
417 address);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800418 if (young)
419 break;
420 }
421 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700422 srcu_read_unlock(&srcu, id);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800423
424 return young;
425}
426
Izik Eidus828502d2009-09-21 17:01:51 -0700427void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
428 pte_t pte)
429{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400430 struct mmu_notifier *subscription;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700431 int id;
Izik Eidus828502d2009-09-21 17:01:51 -0700432
Sagi Grimberg21a92732012-10-08 16:29:24 -0700433 id = srcu_read_lock(&srcu);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400434 hlist_for_each_entry_rcu(subscription,
Qian Cai63886ba2020-03-21 18:22:34 -0700435 &mm->notifier_subscriptions->list, hlist,
436 srcu_read_lock_held(&srcu)) {
Jason Gunthorpe19917222020-01-14 11:11:17 -0400437 if (subscription->ops->change_pte)
438 subscription->ops->change_pte(subscription, mm, address,
439 pte);
Izik Eidus828502d2009-09-21 17:01:51 -0700440 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700441 srcu_read_unlock(&srcu, id);
Izik Eidus828502d2009-09-21 17:01:51 -0700442}
443
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400444static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400445 const struct mmu_notifier_range *range)
446{
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400447 struct mmu_interval_notifier *interval_sub;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400448 unsigned long cur_seq;
449
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400450 for (interval_sub =
451 mn_itree_inv_start_range(subscriptions, range, &cur_seq);
452 interval_sub;
453 interval_sub = mn_itree_inv_next(interval_sub, range)) {
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400454 bool ret;
455
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400456 ret = interval_sub->ops->invalidate(interval_sub, range,
457 cur_seq);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400458 if (!ret) {
459 if (WARN_ON(mmu_notifier_range_blockable(range)))
460 continue;
461 goto out_would_block;
462 }
463 }
464 return 0;
465
466out_would_block:
467 /*
468 * On -EAGAIN the non-blocking caller is not allowed to call
469 * invalidate_range_end()
470 */
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400471 mn_itree_inv_end(subscriptions);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400472 return -EAGAIN;
473}
474
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400475static int mn_hlist_invalidate_range_start(
476 struct mmu_notifier_subscriptions *subscriptions,
477 struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700478{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400479 struct mmu_notifier *subscription;
Michal Hocko93065ac2018-08-21 21:52:33 -0700480 int ret = 0;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700481 int id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700482
Sagi Grimberg21a92732012-10-08 16:29:24 -0700483 id = srcu_read_lock(&srcu);
Qian Cai63886ba2020-03-21 18:22:34 -0700484 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
485 srcu_read_lock_held(&srcu)) {
Jason Gunthorpe19917222020-01-14 11:11:17 -0400486 const struct mmu_notifier_ops *ops = subscription->ops;
487
488 if (ops->invalidate_range_start) {
Daniel Vetterba170f72019-08-26 22:14:24 +0200489 int _ret;
490
491 if (!mmu_notifier_range_blockable(range))
492 non_block_start();
Jason Gunthorpe19917222020-01-14 11:11:17 -0400493 _ret = ops->invalidate_range_start(subscription, range);
Daniel Vetterba170f72019-08-26 22:14:24 +0200494 if (!mmu_notifier_range_blockable(range))
495 non_block_end();
Michal Hocko93065ac2018-08-21 21:52:33 -0700496 if (_ret) {
497 pr_info("%pS callback failed with %d in %sblockable context.\n",
Jason Gunthorpe19917222020-01-14 11:11:17 -0400498 ops->invalidate_range_start, _ret,
499 !mmu_notifier_range_blockable(range) ?
500 "non-" :
501 "");
Daniel Vetter8402ce62019-08-14 22:20:23 +0200502 WARN_ON(mmu_notifier_range_blockable(range) ||
Jason Gunthorpedf2ec762019-11-05 21:16:37 -0800503 _ret != -EAGAIN);
Michal Hocko93065ac2018-08-21 21:52:33 -0700504 ret = _ret;
505 }
506 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700507 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700508 srcu_read_unlock(&srcu, id);
Michal Hocko93065ac2018-08-21 21:52:33 -0700509
510 return ret;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700511}
512
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400513int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
514{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400515 struct mmu_notifier_subscriptions *subscriptions =
516 range->mm->notifier_subscriptions;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400517 int ret;
518
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400519 if (subscriptions->has_itree) {
520 ret = mn_itree_invalidate(subscriptions, range);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400521 if (ret)
522 return ret;
523 }
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400524 if (!hlist_empty(&subscriptions->list))
525 return mn_hlist_invalidate_range_start(subscriptions, range);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400526 return 0;
527}
528
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400529static void
530mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
531 struct mmu_notifier_range *range, bool only_end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700532{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400533 struct mmu_notifier *subscription;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700534 int id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700535
Sagi Grimberg21a92732012-10-08 16:29:24 -0700536 id = srcu_read_lock(&srcu);
Qian Cai63886ba2020-03-21 18:22:34 -0700537 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
538 srcu_read_lock_held(&srcu)) {
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100539 /*
540 * Call invalidate_range here too to avoid the need for the
541 * subsystem of having to register an invalidate_range_end
542 * call-back when there is invalidate_range already. Usually a
543 * subsystem registers either invalidate_range_start()/end() or
544 * invalidate_range(), so this will be no additional overhead
545 * (besides the pointer check).
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800546 *
547 * We skip call to invalidate_range() if we know it is safe ie
548 * call site use mmu_notifier_invalidate_range_only_end() which
549 * is safe to do when we know that a call to invalidate_range()
550 * already happen under page table lock.
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100551 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400552 if (!only_end && subscription->ops->invalidate_range)
553 subscription->ops->invalidate_range(subscription,
554 range->mm,
555 range->start,
556 range->end);
557 if (subscription->ops->invalidate_range_end) {
Daniel Vetterba170f72019-08-26 22:14:24 +0200558 if (!mmu_notifier_range_blockable(range))
559 non_block_start();
Jason Gunthorpe19917222020-01-14 11:11:17 -0400560 subscription->ops->invalidate_range_end(subscription,
561 range);
Daniel Vetterba170f72019-08-26 22:14:24 +0200562 if (!mmu_notifier_range_blockable(range))
563 non_block_end();
564 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700565 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700566 srcu_read_unlock(&srcu, id);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400567}
568
569void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
570 bool only_end)
571{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400572 struct mmu_notifier_subscriptions *subscriptions =
573 range->mm->notifier_subscriptions;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400574
575 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400576 if (subscriptions->has_itree)
577 mn_itree_inv_end(subscriptions);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400578
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400579 if (!hlist_empty(&subscriptions->list))
580 mn_hlist_invalidate_end(subscriptions, range, only_end);
Daniel Vetter23b68392019-08-26 22:14:21 +0200581 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700582}
583
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100584void __mmu_notifier_invalidate_range(struct mm_struct *mm,
585 unsigned long start, unsigned long end)
586{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400587 struct mmu_notifier *subscription;
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100588 int id;
589
590 id = srcu_read_lock(&srcu);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400591 hlist_for_each_entry_rcu(subscription,
Qian Cai63886ba2020-03-21 18:22:34 -0700592 &mm->notifier_subscriptions->list, hlist,
593 srcu_read_lock_held(&srcu)) {
Jason Gunthorpe19917222020-01-14 11:11:17 -0400594 if (subscription->ops->invalidate_range)
595 subscription->ops->invalidate_range(subscription, mm,
596 start, end);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100597 }
598 srcu_read_unlock(&srcu, id);
599}
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100600
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300601/*
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400602 * Same as mmu_notifier_register but here the caller must hold the mmap_sem in
603 * write mode. A NULL mn signals the notifier is being registered for itree
604 * mode.
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300605 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400606int __mmu_notifier_register(struct mmu_notifier *subscription,
607 struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700608{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400609 struct mmu_notifier_subscriptions *subscriptions = NULL;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700610 int ret;
611
Michel Lespinasse42fc5412020-06-08 21:33:44 -0700612 mmap_assert_write_locked(mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700613 BUG_ON(atomic_read(&mm->mm_users) <= 0);
614
Daniel Vetter66204f12019-08-26 22:14:22 +0200615 if (IS_ENABLED(CONFIG_LOCKDEP)) {
616 fs_reclaim_acquire(GFP_KERNEL);
617 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
618 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
619 fs_reclaim_release(GFP_KERNEL);
620 }
621
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400622 if (!mm->notifier_subscriptions) {
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300623 /*
624 * kmalloc cannot be called under mm_take_all_locks(), but we
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400625 * know that mm->notifier_subscriptions can't change while we
626 * hold the write side of the mmap_sem.
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300627 */
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400628 subscriptions = kzalloc(
629 sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
630 if (!subscriptions)
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300631 return -ENOMEM;
632
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400633 INIT_HLIST_HEAD(&subscriptions->list);
634 spin_lock_init(&subscriptions->lock);
635 subscriptions->invalidate_seq = 2;
636 subscriptions->itree = RB_ROOT_CACHED;
637 init_waitqueue_head(&subscriptions->wq);
638 INIT_HLIST_HEAD(&subscriptions->deferred_list);
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300639 }
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700640
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700641 ret = mm_take_all_locks(mm);
642 if (unlikely(ret))
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700643 goto out_clean;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700644
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700645 /*
646 * Serialize the update against mmu_notifier_unregister. A
647 * side note: mmu_notifier_release can't run concurrently with
648 * us because we hold the mm_users pin (either implicitly as
649 * current->mm or explicitly with get_task_mm() or similar).
650 * We can't race against any other mmu notifier method either
651 * thanks to mm_take_all_locks().
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400652 *
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400653 * release semantics on the initialization of the
654 * mmu_notifier_subscriptions's contents are provided for unlocked
655 * readers. acquire can only be used while holding the mmgrab or
656 * mmget, and is safe because once created the
657 * mmu_notifier_subscriptions is not freed until the mm is destroyed.
658 * As above, users holding the mmap_sem or one of the
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400659 * mm_take_all_locks() do not need to use acquire semantics.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700660 */
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400661 if (subscriptions)
662 smp_store_release(&mm->notifier_subscriptions, subscriptions);
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300663
Jason Gunthorpe19917222020-01-14 11:11:17 -0400664 if (subscription) {
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400665 /* Pairs with the mmdrop in mmu_notifier_unregister_* */
666 mmgrab(mm);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400667 subscription->mm = mm;
668 subscription->users = 1;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400669
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400670 spin_lock(&mm->notifier_subscriptions->lock);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400671 hlist_add_head_rcu(&subscription->hlist,
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400672 &mm->notifier_subscriptions->list);
673 spin_unlock(&mm->notifier_subscriptions->lock);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400674 } else
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400675 mm->notifier_subscriptions->has_itree = true;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700676
677 mm_drop_all_locks(mm);
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300678 BUG_ON(atomic_read(&mm->mm_users) <= 0);
679 return 0;
680
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700681out_clean:
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400682 kfree(subscriptions);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700683 return ret;
684}
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300685EXPORT_SYMBOL_GPL(__mmu_notifier_register);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700686
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300687/**
688 * mmu_notifier_register - Register a notifier on a mm
689 * @mn: The notifier to attach
690 * @mm: The mm to attach the notifier to
691 *
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700692 * Must not hold mmap_sem nor any other VM related lock when calling
693 * this registration function. Must also ensure mm_users can't go down
694 * to zero while this runs to avoid races with mmu_notifier_release,
695 * so mm has to be current->mm or the mm should be pinned safely such
696 * as with get_task_mm(). If the mm is not current->mm, the mm_users
697 * pin should be released by calling mmput after mmu_notifier_register
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300698 * returns.
699 *
700 * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
701 * unregister the notifier.
702 *
Jason Gunthorpe19917222020-01-14 11:11:17 -0400703 * While the caller has a mmu_notifier get the subscription->mm pointer will remain
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300704 * valid, and can be converted to an active mm pointer via mmget_not_zero().
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700705 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400706int mmu_notifier_register(struct mmu_notifier *subscription,
707 struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700708{
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300709 int ret;
710
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700711 mmap_write_lock(mm);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400712 ret = __mmu_notifier_register(subscription, mm);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700713 mmap_write_unlock(mm);
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300714 return ret;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700715}
716EXPORT_SYMBOL_GPL(mmu_notifier_register);
717
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300718static struct mmu_notifier *
719find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
720{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400721 struct mmu_notifier *subscription;
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300722
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400723 spin_lock(&mm->notifier_subscriptions->lock);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400724 hlist_for_each_entry_rcu(subscription,
Qian Cai63886ba2020-03-21 18:22:34 -0700725 &mm->notifier_subscriptions->list, hlist,
726 lockdep_is_held(&mm->notifier_subscriptions->lock)) {
Jason Gunthorpe19917222020-01-14 11:11:17 -0400727 if (subscription->ops != ops)
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300728 continue;
729
Jason Gunthorpe19917222020-01-14 11:11:17 -0400730 if (likely(subscription->users != UINT_MAX))
731 subscription->users++;
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300732 else
Jason Gunthorpe19917222020-01-14 11:11:17 -0400733 subscription = ERR_PTR(-EOVERFLOW);
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400734 spin_unlock(&mm->notifier_subscriptions->lock);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400735 return subscription;
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300736 }
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400737 spin_unlock(&mm->notifier_subscriptions->lock);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300738 return NULL;
739}
740
741/**
742 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
743 * the mm & ops
744 * @ops: The operations struct being subscribe with
745 * @mm : The mm to attach notifiers too
746 *
747 * This function either allocates a new mmu_notifier via
748 * ops->alloc_notifier(), or returns an already existing notifier on the
749 * list. The value of the ops pointer is used to determine when two notifiers
750 * are the same.
751 *
752 * Each call to mmu_notifier_get() must be paired with a call to
753 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_sem.
754 *
755 * While the caller has a mmu_notifier get the mm pointer will remain valid,
756 * and can be converted to an active mm pointer via mmget_not_zero().
757 */
758struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
759 struct mm_struct *mm)
760{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400761 struct mmu_notifier *subscription;
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300762 int ret;
763
Michel Lespinasse42fc5412020-06-08 21:33:44 -0700764 mmap_assert_write_locked(mm);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300765
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400766 if (mm->notifier_subscriptions) {
Jason Gunthorpe19917222020-01-14 11:11:17 -0400767 subscription = find_get_mmu_notifier(mm, ops);
768 if (subscription)
769 return subscription;
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300770 }
771
Jason Gunthorpe19917222020-01-14 11:11:17 -0400772 subscription = ops->alloc_notifier(mm);
773 if (IS_ERR(subscription))
774 return subscription;
775 subscription->ops = ops;
776 ret = __mmu_notifier_register(subscription, mm);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300777 if (ret)
778 goto out_free;
Jason Gunthorpe19917222020-01-14 11:11:17 -0400779 return subscription;
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300780out_free:
Jason Gunthorpe19917222020-01-14 11:11:17 -0400781 subscription->ops->free_notifier(subscription);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300782 return ERR_PTR(ret);
783}
784EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
785
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700786/* this is called after the last mmu_notifier_unregister() returned */
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400787void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700788{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400789 BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list));
790 kfree(mm->notifier_subscriptions);
791 mm->notifier_subscriptions = LIST_POISON1; /* debug */
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700792}
793
794/*
795 * This releases the mm_count pin automatically and frees the mm
796 * structure if it was the last user of it. It serializes against
Sagi Grimberg21a92732012-10-08 16:29:24 -0700797 * running mmu notifiers with SRCU and against mmu_notifier_unregister
798 * with the unregister lock + SRCU. All sptes must be dropped before
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700799 * calling mmu_notifier_unregister. ->release or any other notifier
800 * method may be invoked concurrently with mmu_notifier_unregister,
801 * and only after mmu_notifier_unregister returned we're guaranteed
802 * that ->release or any other method can't run anymore.
803 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400804void mmu_notifier_unregister(struct mmu_notifier *subscription,
805 struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700806{
807 BUG_ON(atomic_read(&mm->mm_count) <= 0);
808
Jason Gunthorpe19917222020-01-14 11:11:17 -0400809 if (!hlist_unhashed(&subscription->hlist)) {
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700810 /*
811 * SRCU here will force exit_mmap to wait for ->release to
812 * finish before freeing the pages.
813 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700814 int id;
Xiao Guangrong3ad3d902012-07-31 16:45:52 -0700815
Robin Holt751efd82013-02-22 16:35:34 -0800816 id = srcu_read_lock(&srcu);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700817 /*
818 * exit_mmap will block in mmu_notifier_release to guarantee
819 * that ->release is called before freeing the pages.
820 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400821 if (subscription->ops->release)
822 subscription->ops->release(subscription, mm);
Robin Holt751efd82013-02-22 16:35:34 -0800823 srcu_read_unlock(&srcu, id);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700824
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400825 spin_lock(&mm->notifier_subscriptions->lock);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700826 /*
827 * Can not use list_del_rcu() since __mmu_notifier_release
828 * can delete it before we hold the lock.
829 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400830 hlist_del_init_rcu(&subscription->hlist);
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400831 spin_unlock(&mm->notifier_subscriptions->lock);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700832 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700833
834 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700835 * Wait for any running method to finish, of course including
Geert Uytterhoeven83a35e32013-06-28 11:27:31 +0200836 * ->release if it was run by mmu_notifier_release instead of us.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700837 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700838 synchronize_srcu(&srcu);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700839
840 BUG_ON(atomic_read(&mm->mm_count) <= 0);
841
842 mmdrop(mm);
843}
844EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
Sagi Grimberg21a92732012-10-08 16:29:24 -0700845
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300846static void mmu_notifier_free_rcu(struct rcu_head *rcu)
847{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400848 struct mmu_notifier *subscription =
849 container_of(rcu, struct mmu_notifier, rcu);
850 struct mm_struct *mm = subscription->mm;
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300851
Jason Gunthorpe19917222020-01-14 11:11:17 -0400852 subscription->ops->free_notifier(subscription);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300853 /* Pairs with the get in __mmu_notifier_register() */
854 mmdrop(mm);
855}
856
857/**
858 * mmu_notifier_put - Release the reference on the notifier
859 * @mn: The notifier to act on
860 *
861 * This function must be paired with each mmu_notifier_get(), it releases the
862 * reference obtained by the get. If this is the last reference then process
863 * to free the notifier will be run asynchronously.
864 *
865 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
866 * when the mm_struct is destroyed. Instead free_notifier is always called to
867 * release any resources held by the user.
868 *
869 * As ops->release is not guaranteed to be called, the user must ensure that
870 * all sptes are dropped, and no new sptes can be established before
871 * mmu_notifier_put() is called.
872 *
873 * This function can be called from the ops->release callback, however the
874 * caller must still ensure it is called pairwise with mmu_notifier_get().
875 *
876 * Modules calling this function must call mmu_notifier_synchronize() in
877 * their __exit functions to ensure the async work is completed.
878 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400879void mmu_notifier_put(struct mmu_notifier *subscription)
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300880{
Jason Gunthorpe19917222020-01-14 11:11:17 -0400881 struct mm_struct *mm = subscription->mm;
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300882
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400883 spin_lock(&mm->notifier_subscriptions->lock);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400884 if (WARN_ON(!subscription->users) || --subscription->users)
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300885 goto out_unlock;
Jason Gunthorpe19917222020-01-14 11:11:17 -0400886 hlist_del_init_rcu(&subscription->hlist);
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400887 spin_unlock(&mm->notifier_subscriptions->lock);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300888
Jason Gunthorpe19917222020-01-14 11:11:17 -0400889 call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300890 return;
891
892out_unlock:
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400893 spin_unlock(&mm->notifier_subscriptions->lock);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300894}
895EXPORT_SYMBOL_GPL(mmu_notifier_put);
896
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400897static int __mmu_interval_notifier_insert(
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400898 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400899 struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400900 unsigned long length, const struct mmu_interval_notifier_ops *ops)
901{
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400902 interval_sub->mm = mm;
903 interval_sub->ops = ops;
904 RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
905 interval_sub->interval_tree.start = start;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400906 /*
907 * Note that the representation of the intervals in the interval tree
908 * considers the ending point as contained in the interval.
909 */
910 if (length == 0 ||
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400911 check_add_overflow(start, length - 1,
912 &interval_sub->interval_tree.last))
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400913 return -EOVERFLOW;
914
915 /* Must call with a mmget() held */
916 if (WARN_ON(atomic_read(&mm->mm_count) <= 0))
917 return -EINVAL;
918
919 /* pairs with mmdrop in mmu_interval_notifier_remove() */
920 mmgrab(mm);
921
922 /*
923 * If some invalidate_range_start/end region is going on in parallel
924 * we don't know what VA ranges are affected, so we must assume this
925 * new range is included.
926 *
927 * If the itree is invalidating then we are not allowed to change
928 * it. Retrying until invalidation is done is tricky due to the
929 * possibility for live lock, instead defer the add to
930 * mn_itree_inv_end() so this algorithm is deterministic.
931 *
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400932 * In all cases the value for the interval_sub->invalidate_seq should be
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400933 * odd, see mmu_interval_read_begin()
934 */
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400935 spin_lock(&subscriptions->lock);
936 if (subscriptions->active_invalidate_ranges) {
937 if (mn_itree_is_invalidating(subscriptions))
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400938 hlist_add_head(&interval_sub->deferred_item,
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400939 &subscriptions->deferred_list);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400940 else {
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400941 subscriptions->invalidate_seq |= 1;
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400942 interval_tree_insert(&interval_sub->interval_tree,
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400943 &subscriptions->itree);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400944 }
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400945 interval_sub->invalidate_seq = subscriptions->invalidate_seq;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400946 } else {
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400947 WARN_ON(mn_itree_is_invalidating(subscriptions));
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400948 /*
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400949 * The starting seq for a subscription not under invalidation
950 * should be odd, not equal to the current invalidate_seq and
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400951 * invalidate_seq should not 'wrap' to the new seq any time
952 * soon.
953 */
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400954 interval_sub->invalidate_seq =
955 subscriptions->invalidate_seq - 1;
956 interval_tree_insert(&interval_sub->interval_tree,
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400957 &subscriptions->itree);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400958 }
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400959 spin_unlock(&subscriptions->lock);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400960 return 0;
961}
962
963/**
964 * mmu_interval_notifier_insert - Insert an interval notifier
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400965 * @interval_sub: Interval subscription to register
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400966 * @start: Starting virtual address to monitor
967 * @length: Length of the range to monitor
968 * @mm : mm_struct to attach to
969 *
970 * This function subscribes the interval notifier for notifications from the
971 * mm. Upon return the ops related to mmu_interval_notifier will be called
972 * whenever an event that intersects with the given range occurs.
973 *
974 * Upon return the range_notifier may not be present in the interval tree yet.
975 * The caller must use the normal interval notifier read flow via
976 * mmu_interval_read_begin() to establish SPTEs for this range.
977 */
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400978int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400979 struct mm_struct *mm, unsigned long start,
980 unsigned long length,
981 const struct mmu_interval_notifier_ops *ops)
982{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400983 struct mmu_notifier_subscriptions *subscriptions;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400984 int ret;
985
986 might_lock(&mm->mmap_sem);
987
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400988 subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
989 if (!subscriptions || !subscriptions->has_itree) {
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400990 ret = mmu_notifier_register(NULL, mm);
991 if (ret)
992 return ret;
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400993 subscriptions = mm->notifier_subscriptions;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400994 }
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400995 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
996 start, length, ops);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400997}
998EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
999
1000int mmu_interval_notifier_insert_locked(
Jason Gunthorpe5292e242020-01-14 11:29:52 -04001001 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001002 unsigned long start, unsigned long length,
1003 const struct mmu_interval_notifier_ops *ops)
1004{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -04001005 struct mmu_notifier_subscriptions *subscriptions =
1006 mm->notifier_subscriptions;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001007 int ret;
1008
Michel Lespinasse42fc5412020-06-08 21:33:44 -07001009 mmap_assert_write_locked(mm);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001010
Jason Gunthorpe984cfe42019-12-18 13:40:35 -04001011 if (!subscriptions || !subscriptions->has_itree) {
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001012 ret = __mmu_notifier_register(NULL, mm);
1013 if (ret)
1014 return ret;
Jason Gunthorpe984cfe42019-12-18 13:40:35 -04001015 subscriptions = mm->notifier_subscriptions;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001016 }
Jason Gunthorpe5292e242020-01-14 11:29:52 -04001017 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1018 start, length, ops);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001019}
1020EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
1021
1022/**
1023 * mmu_interval_notifier_remove - Remove a interval notifier
Jason Gunthorpe5292e242020-01-14 11:29:52 -04001024 * @interval_sub: Interval subscription to unregister
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001025 *
1026 * This function must be paired with mmu_interval_notifier_insert(). It cannot
1027 * be called from any ops callback.
1028 *
1029 * Once this returns ops callbacks are no longer running on other CPUs and
1030 * will not be called in future.
1031 */
Jason Gunthorpe5292e242020-01-14 11:29:52 -04001032void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001033{
Jason Gunthorpe5292e242020-01-14 11:29:52 -04001034 struct mm_struct *mm = interval_sub->mm;
Jason Gunthorpe984cfe42019-12-18 13:40:35 -04001035 struct mmu_notifier_subscriptions *subscriptions =
1036 mm->notifier_subscriptions;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001037 unsigned long seq = 0;
1038
1039 might_sleep();
1040
Jason Gunthorpe984cfe42019-12-18 13:40:35 -04001041 spin_lock(&subscriptions->lock);
1042 if (mn_itree_is_invalidating(subscriptions)) {
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001043 /*
1044 * remove is being called after insert put this on the
1045 * deferred list, but before the deferred list was processed.
1046 */
Jason Gunthorpe5292e242020-01-14 11:29:52 -04001047 if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
1048 hlist_del(&interval_sub->deferred_item);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001049 } else {
Jason Gunthorpe5292e242020-01-14 11:29:52 -04001050 hlist_add_head(&interval_sub->deferred_item,
Jason Gunthorpe984cfe42019-12-18 13:40:35 -04001051 &subscriptions->deferred_list);
1052 seq = subscriptions->invalidate_seq;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001053 }
1054 } else {
Jason Gunthorpe5292e242020-01-14 11:29:52 -04001055 WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
1056 interval_tree_remove(&interval_sub->interval_tree,
Jason Gunthorpe984cfe42019-12-18 13:40:35 -04001057 &subscriptions->itree);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001058 }
Jason Gunthorpe984cfe42019-12-18 13:40:35 -04001059 spin_unlock(&subscriptions->lock);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001060
1061 /*
1062 * The possible sleep on progress in the invalidation requires the
1063 * caller not hold any locks held by invalidation callbacks.
1064 */
1065 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
1066 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
1067 if (seq)
Jason Gunthorpe984cfe42019-12-18 13:40:35 -04001068 wait_event(subscriptions->wq,
1069 READ_ONCE(subscriptions->invalidate_seq) != seq);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -04001070
1071 /* pairs with mmgrab in mmu_interval_notifier_insert() */
1072 mmdrop(mm);
1073}
1074EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
1075
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -03001076/**
1077 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
1078 *
1079 * This function ensures that all outstanding async SRU work from
1080 * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
1081 * associated with an unused mmu_notifier will no longer be called.
1082 *
1083 * Before using the caller must ensure that all of its mmu_notifiers have been
1084 * fully released via mmu_notifier_put().
1085 *
1086 * Modules using the mmu_notifier_put() API should call this in their __exit
1087 * function to avoid module unloading races.
1088 */
1089void mmu_notifier_synchronize(void)
1090{
1091 synchronize_srcu(&srcu);
1092}
1093EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
1094
Jérôme Glissec6d23412019-05-13 17:21:00 -07001095bool
1096mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
1097{
1098 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
1099 return false;
1100 /* Return true if the vma still have the read flag set. */
1101 return range->vma->vm_flags & VM_READ;
1102}
1103EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);