blob: 7fde88695f35d62c1f22ffe1cc724ffe85ea92e9 [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002/*
3 * linux/mm/mmu_notifier.c
4 *
5 * Copyright (C) 2008 Qumranet, Inc.
6 * Copyright (C) 2008 SGI
Christoph Lameter93e205a2016-03-17 14:21:15 -07007 * Christoph Lameter <cl@linux.com>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07008 */
9
10#include <linux/rculist.h>
11#include <linux/mmu_notifier.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040012#include <linux/export.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070013#include <linux/mm.h>
14#include <linux/err.h>
Sagi Grimberg21a92732012-10-08 16:29:24 -070015#include <linux/srcu.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070016#include <linux/rcupdate.h>
17#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010018#include <linux/sched/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070020
Sagi Grimberg21a92732012-10-08 16:29:24 -070021/* global SRCU for all MMs */
Paul E. McKenneydde8da62017-03-25 10:42:07 -070022DEFINE_STATIC_SRCU(srcu);
Sagi Grimberg21a92732012-10-08 16:29:24 -070023
Daniel Vetter23b68392019-08-26 22:14:21 +020024#ifdef CONFIG_LOCKDEP
25struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
26 .name = "mmu_notifier_invalidate_range_start"
27};
28#endif
29
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070030/*
31 * This function can't run concurrently against mmu_notifier_register
32 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
33 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
34 * in parallel despite there being no task using this mm any more,
35 * through the vmas outside of the exit_mmap context, such as with
36 * vmtruncate. This serializes against mmu_notifier_unregister with
Sagi Grimberg21a92732012-10-08 16:29:24 -070037 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
38 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070039 * can't go away from under us as exit_mmap holds an mm_count pin
40 * itself.
41 */
42void __mmu_notifier_release(struct mm_struct *mm)
43{
44 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -070045 int id;
Xiao Guangrong3ad3d902012-07-31 16:45:52 -070046
47 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -070048 * SRCU here will block mmu_notifier_unregister until
49 * ->release returns.
Xiao Guangrong3ad3d902012-07-31 16:45:52 -070050 */
Sagi Grimberg21a92732012-10-08 16:29:24 -070051 id = srcu_read_lock(&srcu);
Xiao Guangrongd34883d2013-05-24 15:55:11 -070052 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
53 /*
54 * If ->release runs before mmu_notifier_unregister it must be
55 * handled, as it's the only way for the driver to flush all
56 * existing sptes and stop the driver from establishing any more
57 * sptes before all the pages in the mm are freed.
58 */
59 if (mn->ops->release)
60 mn->ops->release(mn, mm);
Xiao Guangrongd34883d2013-05-24 15:55:11 -070061
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070062 spin_lock(&mm->mmu_notifier_mm->lock);
63 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
64 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
65 struct mmu_notifier,
66 hlist);
67 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -070068 * We arrived before mmu_notifier_unregister so
69 * mmu_notifier_unregister will do nothing other than to wait
70 * for ->release to finish and for mmu_notifier_unregister to
71 * return.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070072 */
73 hlist_del_init_rcu(&mn->hlist);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070074 }
75 spin_unlock(&mm->mmu_notifier_mm->lock);
Peter Zijlstrab9722162014-08-06 16:08:20 -070076 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070077
78 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -070079 * synchronize_srcu here prevents mmu_notifier_release from returning to
80 * exit_mmap (which would proceed with freeing all pages in the mm)
81 * until the ->release method returns, if it was invoked by
82 * mmu_notifier_unregister.
83 *
84 * The mmu_notifier_mm can't go away from under us because one mm_count
85 * is held by exit_mmap.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070086 */
Sagi Grimberg21a92732012-10-08 16:29:24 -070087 synchronize_srcu(&srcu);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070088}
89
90/*
91 * If no young bitflag is supported by the hardware, ->clear_flush_young can
92 * unmap the address and return 1 or 0 depending if the mapping previously
93 * existed or not.
94 */
95int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070096 unsigned long start,
97 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070098{
99 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700100 int young = 0, id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700101
Sagi Grimberg21a92732012-10-08 16:29:24 -0700102 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800103 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700104 if (mn->ops->clear_flush_young)
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700105 young |= mn->ops->clear_flush_young(mn, mm, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700106 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700107 srcu_read_unlock(&srcu, id);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700108
109 return young;
110}
111
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700112int __mmu_notifier_clear_young(struct mm_struct *mm,
113 unsigned long start,
114 unsigned long end)
115{
116 struct mmu_notifier *mn;
117 int young = 0, id;
118
119 id = srcu_read_lock(&srcu);
120 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
121 if (mn->ops->clear_young)
122 young |= mn->ops->clear_young(mn, mm, start, end);
123 }
124 srcu_read_unlock(&srcu, id);
125
126 return young;
127}
128
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800129int __mmu_notifier_test_young(struct mm_struct *mm,
130 unsigned long address)
131{
132 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700133 int young = 0, id;
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800134
Sagi Grimberg21a92732012-10-08 16:29:24 -0700135 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800136 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800137 if (mn->ops->test_young) {
138 young = mn->ops->test_young(mn, mm, address);
139 if (young)
140 break;
141 }
142 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700143 srcu_read_unlock(&srcu, id);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800144
145 return young;
146}
147
Izik Eidus828502d2009-09-21 17:01:51 -0700148void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
149 pte_t pte)
150{
151 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700152 int id;
Izik Eidus828502d2009-09-21 17:01:51 -0700153
Sagi Grimberg21a92732012-10-08 16:29:24 -0700154 id = srcu_read_lock(&srcu);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800155 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
Izik Eidus828502d2009-09-21 17:01:51 -0700156 if (mn->ops->change_pte)
157 mn->ops->change_pte(mn, mm, address, pte);
Izik Eidus828502d2009-09-21 17:01:51 -0700158 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700159 srcu_read_unlock(&srcu, id);
Izik Eidus828502d2009-09-21 17:01:51 -0700160}
161
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800162int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700163{
164 struct mmu_notifier *mn;
Michal Hocko93065ac2018-08-21 21:52:33 -0700165 int ret = 0;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700166 int id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700167
Sagi Grimberg21a92732012-10-08 16:29:24 -0700168 id = srcu_read_lock(&srcu);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800169 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
Michal Hocko93065ac2018-08-21 21:52:33 -0700170 if (mn->ops->invalidate_range_start) {
Daniel Vetterba170f72019-08-26 22:14:24 +0200171 int _ret;
172
173 if (!mmu_notifier_range_blockable(range))
174 non_block_start();
175 _ret = mn->ops->invalidate_range_start(mn, range);
176 if (!mmu_notifier_range_blockable(range))
177 non_block_end();
Michal Hocko93065ac2018-08-21 21:52:33 -0700178 if (_ret) {
179 pr_info("%pS callback failed with %d in %sblockable context.\n",
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800180 mn->ops->invalidate_range_start, _ret,
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700181 !mmu_notifier_range_blockable(range) ? "non-" : "");
Daniel Vetter8402ce62019-08-14 22:20:23 +0200182 WARN_ON(mmu_notifier_range_blockable(range) ||
183 ret != -EAGAIN);
Michal Hocko93065ac2018-08-21 21:52:33 -0700184 ret = _ret;
185 }
186 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700187 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700188 srcu_read_unlock(&srcu, id);
Michal Hocko93065ac2018-08-21 21:52:33 -0700189
190 return ret;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700191}
192
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800193void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800194 bool only_end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700195{
196 struct mmu_notifier *mn;
Sagi Grimberg21a92732012-10-08 16:29:24 -0700197 int id;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700198
Daniel Vetter23b68392019-08-26 22:14:21 +0200199 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
Sagi Grimberg21a92732012-10-08 16:29:24 -0700200 id = srcu_read_lock(&srcu);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800201 hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100202 /*
203 * Call invalidate_range here too to avoid the need for the
204 * subsystem of having to register an invalidate_range_end
205 * call-back when there is invalidate_range already. Usually a
206 * subsystem registers either invalidate_range_start()/end() or
207 * invalidate_range(), so this will be no additional overhead
208 * (besides the pointer check).
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800209 *
210 * We skip call to invalidate_range() if we know it is safe ie
211 * call site use mmu_notifier_invalidate_range_only_end() which
212 * is safe to do when we know that a call to invalidate_range()
213 * already happen under page table lock.
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100214 */
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800215 if (!only_end && mn->ops->invalidate_range)
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800216 mn->ops->invalidate_range(mn, range->mm,
217 range->start,
218 range->end);
Daniel Vetterba170f72019-08-26 22:14:24 +0200219 if (mn->ops->invalidate_range_end) {
220 if (!mmu_notifier_range_blockable(range))
221 non_block_start();
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800222 mn->ops->invalidate_range_end(mn, range);
Daniel Vetterba170f72019-08-26 22:14:24 +0200223 if (!mmu_notifier_range_blockable(range))
224 non_block_end();
225 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700226 }
Sagi Grimberg21a92732012-10-08 16:29:24 -0700227 srcu_read_unlock(&srcu, id);
Daniel Vetter23b68392019-08-26 22:14:21 +0200228 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700229}
230
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100231void __mmu_notifier_invalidate_range(struct mm_struct *mm,
232 unsigned long start, unsigned long end)
233{
234 struct mmu_notifier *mn;
235 int id;
236
237 id = srcu_read_lock(&srcu);
238 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
239 if (mn->ops->invalidate_range)
240 mn->ops->invalidate_range(mn, mm, start, end);
241 }
242 srcu_read_unlock(&srcu, id);
243}
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100244
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300245/*
246 * Same as mmu_notifier_register but here the caller must hold the
247 * mmap_sem in write mode.
248 */
249int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700250{
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300251 struct mmu_notifier_mm *mmu_notifier_mm = NULL;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700252 int ret;
253
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300254 lockdep_assert_held_write(&mm->mmap_sem);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700255 BUG_ON(atomic_read(&mm->mm_users) <= 0);
256
Daniel Vetter66204f12019-08-26 22:14:22 +0200257 if (IS_ENABLED(CONFIG_LOCKDEP)) {
258 fs_reclaim_acquire(GFP_KERNEL);
259 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
260 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
261 fs_reclaim_release(GFP_KERNEL);
262 }
263
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300264 mn->mm = mm;
265 mn->users = 1;
266
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300267 if (!mm->mmu_notifier_mm) {
268 /*
269 * kmalloc cannot be called under mm_take_all_locks(), but we
270 * know that mm->mmu_notifier_mm can't change while we hold
271 * the write side of the mmap_sem.
272 */
273 mmu_notifier_mm =
274 kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
275 if (!mmu_notifier_mm)
276 return -ENOMEM;
277
278 INIT_HLIST_HEAD(&mmu_notifier_mm->list);
279 spin_lock_init(&mmu_notifier_mm->lock);
280 }
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700281
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700282 ret = mm_take_all_locks(mm);
283 if (unlikely(ret))
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700284 goto out_clean;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700285
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300286 /* Pairs with the mmdrop in mmu_notifier_unregister_* */
Vegard Nossumf1f10072017-02-27 14:30:07 -0800287 mmgrab(mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700288
289 /*
290 * Serialize the update against mmu_notifier_unregister. A
291 * side note: mmu_notifier_release can't run concurrently with
292 * us because we hold the mm_users pin (either implicitly as
293 * current->mm or explicitly with get_task_mm() or similar).
294 * We can't race against any other mmu notifier method either
295 * thanks to mm_take_all_locks().
296 */
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300297 if (mmu_notifier_mm)
298 mm->mmu_notifier_mm = mmu_notifier_mm;
299
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700300 spin_lock(&mm->mmu_notifier_mm->lock);
Jean-Philippe Brucker543bdb22019-07-11 20:58:50 -0700301 hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700302 spin_unlock(&mm->mmu_notifier_mm->lock);
303
304 mm_drop_all_locks(mm);
Jason Gunthorpe70df2912019-08-06 20:15:39 -0300305 BUG_ON(atomic_read(&mm->mm_users) <= 0);
306 return 0;
307
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700308out_clean:
Gavin Shan35cfa2b2012-10-25 13:38:01 -0700309 kfree(mmu_notifier_mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700310 return ret;
311}
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300312EXPORT_SYMBOL_GPL(__mmu_notifier_register);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700313
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300314/**
315 * mmu_notifier_register - Register a notifier on a mm
316 * @mn: The notifier to attach
317 * @mm: The mm to attach the notifier to
318 *
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700319 * Must not hold mmap_sem nor any other VM related lock when calling
320 * this registration function. Must also ensure mm_users can't go down
321 * to zero while this runs to avoid races with mmu_notifier_release,
322 * so mm has to be current->mm or the mm should be pinned safely such
323 * as with get_task_mm(). If the mm is not current->mm, the mm_users
324 * pin should be released by calling mmput after mmu_notifier_register
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300325 * returns.
326 *
327 * mmu_notifier_unregister() or mmu_notifier_put() must be always called to
328 * unregister the notifier.
329 *
330 * While the caller has a mmu_notifier get the mn->mm pointer will remain
331 * valid, and can be converted to an active mm pointer via mmget_not_zero().
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700332 */
333int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
334{
Jason Gunthorpe56c571032019-08-06 20:15:38 -0300335 int ret;
336
337 down_write(&mm->mmap_sem);
338 ret = __mmu_notifier_register(mn, mm);
339 up_write(&mm->mmap_sem);
340 return ret;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700341}
342EXPORT_SYMBOL_GPL(mmu_notifier_register);
343
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300344static struct mmu_notifier *
345find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
346{
347 struct mmu_notifier *mn;
348
349 spin_lock(&mm->mmu_notifier_mm->lock);
350 hlist_for_each_entry_rcu (mn, &mm->mmu_notifier_mm->list, hlist) {
351 if (mn->ops != ops)
352 continue;
353
354 if (likely(mn->users != UINT_MAX))
355 mn->users++;
356 else
357 mn = ERR_PTR(-EOVERFLOW);
358 spin_unlock(&mm->mmu_notifier_mm->lock);
359 return mn;
360 }
361 spin_unlock(&mm->mmu_notifier_mm->lock);
362 return NULL;
363}
364
365/**
366 * mmu_notifier_get_locked - Return the single struct mmu_notifier for
367 * the mm & ops
368 * @ops: The operations struct being subscribe with
369 * @mm : The mm to attach notifiers too
370 *
371 * This function either allocates a new mmu_notifier via
372 * ops->alloc_notifier(), or returns an already existing notifier on the
373 * list. The value of the ops pointer is used to determine when two notifiers
374 * are the same.
375 *
376 * Each call to mmu_notifier_get() must be paired with a call to
377 * mmu_notifier_put(). The caller must hold the write side of mm->mmap_sem.
378 *
379 * While the caller has a mmu_notifier get the mm pointer will remain valid,
380 * and can be converted to an active mm pointer via mmget_not_zero().
381 */
382struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
383 struct mm_struct *mm)
384{
385 struct mmu_notifier *mn;
386 int ret;
387
388 lockdep_assert_held_write(&mm->mmap_sem);
389
390 if (mm->mmu_notifier_mm) {
391 mn = find_get_mmu_notifier(mm, ops);
392 if (mn)
393 return mn;
394 }
395
396 mn = ops->alloc_notifier(mm);
397 if (IS_ERR(mn))
398 return mn;
399 mn->ops = ops;
400 ret = __mmu_notifier_register(mn, mm);
401 if (ret)
402 goto out_free;
403 return mn;
404out_free:
405 mn->ops->free_notifier(mn);
406 return ERR_PTR(ret);
407}
408EXPORT_SYMBOL_GPL(mmu_notifier_get_locked);
409
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700410/* this is called after the last mmu_notifier_unregister() returned */
411void __mmu_notifier_mm_destroy(struct mm_struct *mm)
412{
413 BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
414 kfree(mm->mmu_notifier_mm);
415 mm->mmu_notifier_mm = LIST_POISON1; /* debug */
416}
417
418/*
419 * This releases the mm_count pin automatically and frees the mm
420 * structure if it was the last user of it. It serializes against
Sagi Grimberg21a92732012-10-08 16:29:24 -0700421 * running mmu notifiers with SRCU and against mmu_notifier_unregister
422 * with the unregister lock + SRCU. All sptes must be dropped before
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700423 * calling mmu_notifier_unregister. ->release or any other notifier
424 * method may be invoked concurrently with mmu_notifier_unregister,
425 * and only after mmu_notifier_unregister returned we're guaranteed
426 * that ->release or any other method can't run anymore.
427 */
428void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
429{
430 BUG_ON(atomic_read(&mm->mm_count) <= 0);
431
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700432 if (!hlist_unhashed(&mn->hlist)) {
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700433 /*
434 * SRCU here will force exit_mmap to wait for ->release to
435 * finish before freeing the pages.
436 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700437 int id;
Xiao Guangrong3ad3d902012-07-31 16:45:52 -0700438
Robin Holt751efd82013-02-22 16:35:34 -0800439 id = srcu_read_lock(&srcu);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700440 /*
441 * exit_mmap will block in mmu_notifier_release to guarantee
442 * that ->release is called before freeing the pages.
443 */
Robin Holt751efd82013-02-22 16:35:34 -0800444 if (mn->ops->release)
445 mn->ops->release(mn, mm);
Robin Holt751efd82013-02-22 16:35:34 -0800446 srcu_read_unlock(&srcu, id);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700447
448 spin_lock(&mm->mmu_notifier_mm->lock);
449 /*
450 * Can not use list_del_rcu() since __mmu_notifier_release
451 * can delete it before we hold the lock.
452 */
453 hlist_del_init_rcu(&mn->hlist);
Robin Holt751efd82013-02-22 16:35:34 -0800454 spin_unlock(&mm->mmu_notifier_mm->lock);
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700455 }
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700456
457 /*
Xiao Guangrongd34883d2013-05-24 15:55:11 -0700458 * Wait for any running method to finish, of course including
Geert Uytterhoeven83a35e32013-06-28 11:27:31 +0200459 * ->release if it was run by mmu_notifier_release instead of us.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700460 */
Sagi Grimberg21a92732012-10-08 16:29:24 -0700461 synchronize_srcu(&srcu);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700462
463 BUG_ON(atomic_read(&mm->mm_count) <= 0);
464
465 mmdrop(mm);
466}
467EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
Sagi Grimberg21a92732012-10-08 16:29:24 -0700468
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300469static void mmu_notifier_free_rcu(struct rcu_head *rcu)
470{
471 struct mmu_notifier *mn = container_of(rcu, struct mmu_notifier, rcu);
472 struct mm_struct *mm = mn->mm;
473
474 mn->ops->free_notifier(mn);
475 /* Pairs with the get in __mmu_notifier_register() */
476 mmdrop(mm);
477}
478
479/**
480 * mmu_notifier_put - Release the reference on the notifier
481 * @mn: The notifier to act on
482 *
483 * This function must be paired with each mmu_notifier_get(), it releases the
484 * reference obtained by the get. If this is the last reference then process
485 * to free the notifier will be run asynchronously.
486 *
487 * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release
488 * when the mm_struct is destroyed. Instead free_notifier is always called to
489 * release any resources held by the user.
490 *
491 * As ops->release is not guaranteed to be called, the user must ensure that
492 * all sptes are dropped, and no new sptes can be established before
493 * mmu_notifier_put() is called.
494 *
495 * This function can be called from the ops->release callback, however the
496 * caller must still ensure it is called pairwise with mmu_notifier_get().
497 *
498 * Modules calling this function must call mmu_notifier_synchronize() in
499 * their __exit functions to ensure the async work is completed.
500 */
501void mmu_notifier_put(struct mmu_notifier *mn)
502{
503 struct mm_struct *mm = mn->mm;
504
505 spin_lock(&mm->mmu_notifier_mm->lock);
506 if (WARN_ON(!mn->users) || --mn->users)
507 goto out_unlock;
508 hlist_del_init_rcu(&mn->hlist);
509 spin_unlock(&mm->mmu_notifier_mm->lock);
510
511 call_srcu(&srcu, &mn->rcu, mmu_notifier_free_rcu);
512 return;
513
514out_unlock:
515 spin_unlock(&mm->mmu_notifier_mm->lock);
516}
517EXPORT_SYMBOL_GPL(mmu_notifier_put);
518
519/**
520 * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
521 *
522 * This function ensures that all outstanding async SRU work from
523 * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops
524 * associated with an unused mmu_notifier will no longer be called.
525 *
526 * Before using the caller must ensure that all of its mmu_notifiers have been
527 * fully released via mmu_notifier_put().
528 *
529 * Modules using the mmu_notifier_put() API should call this in their __exit
530 * function to avoid module unloading races.
531 */
532void mmu_notifier_synchronize(void)
533{
534 synchronize_srcu(&srcu);
535}
536EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
537
Jérôme Glissec6d23412019-05-13 17:21:00 -0700538bool
539mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
540{
541 if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
542 return false;
543 /* Return true if the vma still have the read flag set. */
544 return range->vma->vm_flags & VM_READ;
545}
546EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);