Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/mm/mmu_notifier.c |
| 4 | * |
| 5 | * Copyright (C) 2008 Qumranet, Inc. |
| 6 | * Copyright (C) 2008 SGI |
Christoph Lameter | 93e205a | 2016-03-17 14:21:15 -0700 | [diff] [blame] | 7 | * Christoph Lameter <cl@linux.com> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/rculist.h> |
| 11 | #include <linux/mmu_notifier.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 12 | #include <linux/export.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 13 | #include <linux/mm.h> |
| 14 | #include <linux/err.h> |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 15 | #include <linux/srcu.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 16 | #include <linux/rcupdate.h> |
| 17 | #include <linux/sched.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 18 | #include <linux/sched/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 19 | #include <linux/slab.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 20 | |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 21 | /* global SRCU for all MMs */ |
Paul E. McKenney | dde8da6 | 2017-03-25 10:42:07 -0700 | [diff] [blame] | 22 | DEFINE_STATIC_SRCU(srcu); |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 23 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 24 | /* |
Peter Zijlstra | b972216 | 2014-08-06 16:08:20 -0700 | [diff] [blame] | 25 | * This function allows mmu_notifier::release callback to delay a call to |
| 26 | * a function that will free appropriate resources. The function must be |
| 27 | * quick and must not block. |
| 28 | */ |
| 29 | void mmu_notifier_call_srcu(struct rcu_head *rcu, |
| 30 | void (*func)(struct rcu_head *rcu)) |
| 31 | { |
| 32 | call_srcu(&srcu, rcu, func); |
| 33 | } |
| 34 | EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu); |
| 35 | |
Peter Zijlstra | b972216 | 2014-08-06 16:08:20 -0700 | [diff] [blame] | 36 | /* |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 37 | * This function can't run concurrently against mmu_notifier_register |
| 38 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap |
| 39 | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers |
| 40 | * in parallel despite there being no task using this mm any more, |
| 41 | * through the vmas outside of the exit_mmap context, such as with |
| 42 | * vmtruncate. This serializes against mmu_notifier_unregister with |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 43 | * the mmu_notifier_mm->lock in addition to SRCU and it serializes |
| 44 | * against the other mmu notifiers with SRCU. struct mmu_notifier_mm |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 45 | * can't go away from under us as exit_mmap holds an mm_count pin |
| 46 | * itself. |
| 47 | */ |
| 48 | void __mmu_notifier_release(struct mm_struct *mm) |
| 49 | { |
| 50 | struct mmu_notifier *mn; |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 51 | int id; |
Xiao Guangrong | 3ad3d90 | 2012-07-31 16:45:52 -0700 | [diff] [blame] | 52 | |
| 53 | /* |
Xiao Guangrong | d34883d | 2013-05-24 15:55:11 -0700 | [diff] [blame] | 54 | * SRCU here will block mmu_notifier_unregister until |
| 55 | * ->release returns. |
Xiao Guangrong | 3ad3d90 | 2012-07-31 16:45:52 -0700 | [diff] [blame] | 56 | */ |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 57 | id = srcu_read_lock(&srcu); |
Xiao Guangrong | d34883d | 2013-05-24 15:55:11 -0700 | [diff] [blame] | 58 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) |
| 59 | /* |
| 60 | * If ->release runs before mmu_notifier_unregister it must be |
| 61 | * handled, as it's the only way for the driver to flush all |
| 62 | * existing sptes and stop the driver from establishing any more |
| 63 | * sptes before all the pages in the mm are freed. |
| 64 | */ |
| 65 | if (mn->ops->release) |
| 66 | mn->ops->release(mn, mm); |
Xiao Guangrong | d34883d | 2013-05-24 15:55:11 -0700 | [diff] [blame] | 67 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 68 | spin_lock(&mm->mmu_notifier_mm->lock); |
| 69 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { |
| 70 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, |
| 71 | struct mmu_notifier, |
| 72 | hlist); |
| 73 | /* |
Xiao Guangrong | d34883d | 2013-05-24 15:55:11 -0700 | [diff] [blame] | 74 | * We arrived before mmu_notifier_unregister so |
| 75 | * mmu_notifier_unregister will do nothing other than to wait |
| 76 | * for ->release to finish and for mmu_notifier_unregister to |
| 77 | * return. |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 78 | */ |
| 79 | hlist_del_init_rcu(&mn->hlist); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 80 | } |
| 81 | spin_unlock(&mm->mmu_notifier_mm->lock); |
Peter Zijlstra | b972216 | 2014-08-06 16:08:20 -0700 | [diff] [blame] | 82 | srcu_read_unlock(&srcu, id); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 83 | |
| 84 | /* |
Xiao Guangrong | d34883d | 2013-05-24 15:55:11 -0700 | [diff] [blame] | 85 | * synchronize_srcu here prevents mmu_notifier_release from returning to |
| 86 | * exit_mmap (which would proceed with freeing all pages in the mm) |
| 87 | * until the ->release method returns, if it was invoked by |
| 88 | * mmu_notifier_unregister. |
| 89 | * |
| 90 | * The mmu_notifier_mm can't go away from under us because one mm_count |
| 91 | * is held by exit_mmap. |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 92 | */ |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 93 | synchronize_srcu(&srcu); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | /* |
| 97 | * If no young bitflag is supported by the hardware, ->clear_flush_young can |
| 98 | * unmap the address and return 1 or 0 depending if the mapping previously |
| 99 | * existed or not. |
| 100 | */ |
| 101 | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 102 | unsigned long start, |
| 103 | unsigned long end) |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 104 | { |
| 105 | struct mmu_notifier *mn; |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 106 | int young = 0, id; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 107 | |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 108 | id = srcu_read_lock(&srcu); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 109 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 110 | if (mn->ops->clear_flush_young) |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 111 | young |= mn->ops->clear_flush_young(mn, mm, start, end); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 112 | } |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 113 | srcu_read_unlock(&srcu, id); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 114 | |
| 115 | return young; |
| 116 | } |
| 117 | |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 118 | int __mmu_notifier_clear_young(struct mm_struct *mm, |
| 119 | unsigned long start, |
| 120 | unsigned long end) |
| 121 | { |
| 122 | struct mmu_notifier *mn; |
| 123 | int young = 0, id; |
| 124 | |
| 125 | id = srcu_read_lock(&srcu); |
| 126 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
| 127 | if (mn->ops->clear_young) |
| 128 | young |= mn->ops->clear_young(mn, mm, start, end); |
| 129 | } |
| 130 | srcu_read_unlock(&srcu, id); |
| 131 | |
| 132 | return young; |
| 133 | } |
| 134 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 135 | int __mmu_notifier_test_young(struct mm_struct *mm, |
| 136 | unsigned long address) |
| 137 | { |
| 138 | struct mmu_notifier *mn; |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 139 | int young = 0, id; |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 140 | |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 141 | id = srcu_read_lock(&srcu); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 142 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 143 | if (mn->ops->test_young) { |
| 144 | young = mn->ops->test_young(mn, mm, address); |
| 145 | if (young) |
| 146 | break; |
| 147 | } |
| 148 | } |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 149 | srcu_read_unlock(&srcu, id); |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 150 | |
| 151 | return young; |
| 152 | } |
| 153 | |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 154 | void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, |
| 155 | pte_t pte) |
| 156 | { |
| 157 | struct mmu_notifier *mn; |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 158 | int id; |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 159 | |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 160 | id = srcu_read_lock(&srcu); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 161 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 162 | if (mn->ops->change_pte) |
| 163 | mn->ops->change_pte(mn, mm, address, pte); |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 164 | } |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 165 | srcu_read_unlock(&srcu, id); |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 166 | } |
| 167 | |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 168 | int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 169 | { |
| 170 | struct mmu_notifier *mn; |
Michal Hocko | 93065ac | 2018-08-21 21:52:33 -0700 | [diff] [blame] | 171 | int ret = 0; |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 172 | int id; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 173 | |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 174 | id = srcu_read_lock(&srcu); |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 175 | hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { |
Michal Hocko | 93065ac | 2018-08-21 21:52:33 -0700 | [diff] [blame] | 176 | if (mn->ops->invalidate_range_start) { |
Jérôme Glisse | 5d6527a | 2018-12-28 00:38:05 -0800 | [diff] [blame] | 177 | int _ret = mn->ops->invalidate_range_start(mn, range); |
Michal Hocko | 93065ac | 2018-08-21 21:52:33 -0700 | [diff] [blame] | 178 | if (_ret) { |
| 179 | pr_info("%pS callback failed with %d in %sblockable context.\n", |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 180 | mn->ops->invalidate_range_start, _ret, |
Jérôme Glisse | dfcd666 | 2019-05-13 17:20:38 -0700 | [diff] [blame] | 181 | !mmu_notifier_range_blockable(range) ? "non-" : ""); |
Michal Hocko | 93065ac | 2018-08-21 21:52:33 -0700 | [diff] [blame] | 182 | ret = _ret; |
| 183 | } |
| 184 | } |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 185 | } |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 186 | srcu_read_unlock(&srcu, id); |
Michal Hocko | 93065ac | 2018-08-21 21:52:33 -0700 | [diff] [blame] | 187 | |
| 188 | return ret; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 189 | } |
Cliff Wickman | fa79419 | 2013-02-22 16:35:58 -0800 | [diff] [blame] | 190 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 191 | |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 192 | void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, |
Jérôme Glisse | 4645b9f | 2017-11-15 17:34:11 -0800 | [diff] [blame] | 193 | bool only_end) |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 194 | { |
| 195 | struct mmu_notifier *mn; |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 196 | int id; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 197 | |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 198 | id = srcu_read_lock(&srcu); |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 199 | hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { |
Joerg Roedel | 0f0a327 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 200 | /* |
| 201 | * Call invalidate_range here too to avoid the need for the |
| 202 | * subsystem of having to register an invalidate_range_end |
| 203 | * call-back when there is invalidate_range already. Usually a |
| 204 | * subsystem registers either invalidate_range_start()/end() or |
| 205 | * invalidate_range(), so this will be no additional overhead |
| 206 | * (besides the pointer check). |
Jérôme Glisse | 4645b9f | 2017-11-15 17:34:11 -0800 | [diff] [blame] | 207 | * |
| 208 | * We skip call to invalidate_range() if we know it is safe ie |
| 209 | * call site use mmu_notifier_invalidate_range_only_end() which |
| 210 | * is safe to do when we know that a call to invalidate_range() |
| 211 | * already happen under page table lock. |
Joerg Roedel | 0f0a327 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 212 | */ |
Jérôme Glisse | 4645b9f | 2017-11-15 17:34:11 -0800 | [diff] [blame] | 213 | if (!only_end && mn->ops->invalidate_range) |
Jérôme Glisse | ac46d4f | 2018-12-28 00:38:09 -0800 | [diff] [blame] | 214 | mn->ops->invalidate_range(mn, range->mm, |
| 215 | range->start, |
| 216 | range->end); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 217 | if (mn->ops->invalidate_range_end) |
Jérôme Glisse | 5d6527a | 2018-12-28 00:38:05 -0800 | [diff] [blame] | 218 | mn->ops->invalidate_range_end(mn, range); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 219 | } |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 220 | srcu_read_unlock(&srcu, id); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 221 | } |
Cliff Wickman | fa79419 | 2013-02-22 16:35:58 -0800 | [diff] [blame] | 222 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 223 | |
Joerg Roedel | 0f0a327 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 224 | void __mmu_notifier_invalidate_range(struct mm_struct *mm, |
| 225 | unsigned long start, unsigned long end) |
| 226 | { |
| 227 | struct mmu_notifier *mn; |
| 228 | int id; |
| 229 | |
| 230 | id = srcu_read_lock(&srcu); |
| 231 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
| 232 | if (mn->ops->invalidate_range) |
| 233 | mn->ops->invalidate_range(mn, mm, start, end); |
| 234 | } |
| 235 | srcu_read_unlock(&srcu, id); |
| 236 | } |
| 237 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range); |
| 238 | |
Jason Gunthorpe | 56c57103 | 2019-08-06 20:15:38 -0300 | [diff] [blame] | 239 | /* |
| 240 | * Same as mmu_notifier_register but here the caller must hold the |
| 241 | * mmap_sem in write mode. |
| 242 | */ |
| 243 | int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 244 | { |
Jason Gunthorpe | 70df291 | 2019-08-06 20:15:39 -0300 | [diff] [blame^] | 245 | struct mmu_notifier_mm *mmu_notifier_mm = NULL; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 246 | int ret; |
| 247 | |
Jason Gunthorpe | 56c57103 | 2019-08-06 20:15:38 -0300 | [diff] [blame] | 248 | lockdep_assert_held_write(&mm->mmap_sem); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 249 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
| 250 | |
Jason Gunthorpe | 70df291 | 2019-08-06 20:15:39 -0300 | [diff] [blame^] | 251 | if (!mm->mmu_notifier_mm) { |
| 252 | /* |
| 253 | * kmalloc cannot be called under mm_take_all_locks(), but we |
| 254 | * know that mm->mmu_notifier_mm can't change while we hold |
| 255 | * the write side of the mmap_sem. |
| 256 | */ |
| 257 | mmu_notifier_mm = |
| 258 | kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); |
| 259 | if (!mmu_notifier_mm) |
| 260 | return -ENOMEM; |
| 261 | |
| 262 | INIT_HLIST_HEAD(&mmu_notifier_mm->list); |
| 263 | spin_lock_init(&mmu_notifier_mm->lock); |
| 264 | } |
Gavin Shan | 35cfa2b | 2012-10-25 13:38:01 -0700 | [diff] [blame] | 265 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 266 | ret = mm_take_all_locks(mm); |
| 267 | if (unlikely(ret)) |
Gavin Shan | 35cfa2b | 2012-10-25 13:38:01 -0700 | [diff] [blame] | 268 | goto out_clean; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 269 | |
Jason Gunthorpe | 70df291 | 2019-08-06 20:15:39 -0300 | [diff] [blame^] | 270 | /* Pairs with the mmdrop in mmu_notifier_unregister_* */ |
Vegard Nossum | f1f1007 | 2017-02-27 14:30:07 -0800 | [diff] [blame] | 271 | mmgrab(mm); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 272 | |
| 273 | /* |
| 274 | * Serialize the update against mmu_notifier_unregister. A |
| 275 | * side note: mmu_notifier_release can't run concurrently with |
| 276 | * us because we hold the mm_users pin (either implicitly as |
| 277 | * current->mm or explicitly with get_task_mm() or similar). |
| 278 | * We can't race against any other mmu notifier method either |
| 279 | * thanks to mm_take_all_locks(). |
| 280 | */ |
Jason Gunthorpe | 70df291 | 2019-08-06 20:15:39 -0300 | [diff] [blame^] | 281 | if (mmu_notifier_mm) |
| 282 | mm->mmu_notifier_mm = mmu_notifier_mm; |
| 283 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 284 | spin_lock(&mm->mmu_notifier_mm->lock); |
Jean-Philippe Brucker | 543bdb2 | 2019-07-11 20:58:50 -0700 | [diff] [blame] | 285 | hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 286 | spin_unlock(&mm->mmu_notifier_mm->lock); |
| 287 | |
| 288 | mm_drop_all_locks(mm); |
Jason Gunthorpe | 70df291 | 2019-08-06 20:15:39 -0300 | [diff] [blame^] | 289 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
| 290 | return 0; |
| 291 | |
Gavin Shan | 35cfa2b | 2012-10-25 13:38:01 -0700 | [diff] [blame] | 292 | out_clean: |
Gavin Shan | 35cfa2b | 2012-10-25 13:38:01 -0700 | [diff] [blame] | 293 | kfree(mmu_notifier_mm); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 294 | return ret; |
| 295 | } |
Jason Gunthorpe | 56c57103 | 2019-08-06 20:15:38 -0300 | [diff] [blame] | 296 | EXPORT_SYMBOL_GPL(__mmu_notifier_register); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 297 | |
| 298 | /* |
| 299 | * Must not hold mmap_sem nor any other VM related lock when calling |
| 300 | * this registration function. Must also ensure mm_users can't go down |
| 301 | * to zero while this runs to avoid races with mmu_notifier_release, |
| 302 | * so mm has to be current->mm or the mm should be pinned safely such |
| 303 | * as with get_task_mm(). If the mm is not current->mm, the mm_users |
| 304 | * pin should be released by calling mmput after mmu_notifier_register |
| 305 | * returns. mmu_notifier_unregister must be always called to |
| 306 | * unregister the notifier. mm_count is automatically pinned to allow |
| 307 | * mmu_notifier_unregister to safely run at any time later, before or |
| 308 | * after exit_mmap. ->release will always be called before exit_mmap |
| 309 | * frees the pages. |
| 310 | */ |
| 311 | int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) |
| 312 | { |
Jason Gunthorpe | 56c57103 | 2019-08-06 20:15:38 -0300 | [diff] [blame] | 313 | int ret; |
| 314 | |
| 315 | down_write(&mm->mmap_sem); |
| 316 | ret = __mmu_notifier_register(mn, mm); |
| 317 | up_write(&mm->mmap_sem); |
| 318 | return ret; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 319 | } |
| 320 | EXPORT_SYMBOL_GPL(mmu_notifier_register); |
| 321 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 322 | /* this is called after the last mmu_notifier_unregister() returned */ |
| 323 | void __mmu_notifier_mm_destroy(struct mm_struct *mm) |
| 324 | { |
| 325 | BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); |
| 326 | kfree(mm->mmu_notifier_mm); |
| 327 | mm->mmu_notifier_mm = LIST_POISON1; /* debug */ |
| 328 | } |
| 329 | |
| 330 | /* |
| 331 | * This releases the mm_count pin automatically and frees the mm |
| 332 | * structure if it was the last user of it. It serializes against |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 333 | * running mmu notifiers with SRCU and against mmu_notifier_unregister |
| 334 | * with the unregister lock + SRCU. All sptes must be dropped before |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 335 | * calling mmu_notifier_unregister. ->release or any other notifier |
| 336 | * method may be invoked concurrently with mmu_notifier_unregister, |
| 337 | * and only after mmu_notifier_unregister returned we're guaranteed |
| 338 | * that ->release or any other method can't run anymore. |
| 339 | */ |
| 340 | void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) |
| 341 | { |
| 342 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
| 343 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 344 | if (!hlist_unhashed(&mn->hlist)) { |
Xiao Guangrong | d34883d | 2013-05-24 15:55:11 -0700 | [diff] [blame] | 345 | /* |
| 346 | * SRCU here will force exit_mmap to wait for ->release to |
| 347 | * finish before freeing the pages. |
| 348 | */ |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 349 | int id; |
Xiao Guangrong | 3ad3d90 | 2012-07-31 16:45:52 -0700 | [diff] [blame] | 350 | |
Robin Holt | 751efd8 | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 351 | id = srcu_read_lock(&srcu); |
Xiao Guangrong | d34883d | 2013-05-24 15:55:11 -0700 | [diff] [blame] | 352 | /* |
| 353 | * exit_mmap will block in mmu_notifier_release to guarantee |
| 354 | * that ->release is called before freeing the pages. |
| 355 | */ |
Robin Holt | 751efd8 | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 356 | if (mn->ops->release) |
| 357 | mn->ops->release(mn, mm); |
Robin Holt | 751efd8 | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 358 | srcu_read_unlock(&srcu, id); |
Xiao Guangrong | d34883d | 2013-05-24 15:55:11 -0700 | [diff] [blame] | 359 | |
| 360 | spin_lock(&mm->mmu_notifier_mm->lock); |
| 361 | /* |
| 362 | * Can not use list_del_rcu() since __mmu_notifier_release |
| 363 | * can delete it before we hold the lock. |
| 364 | */ |
| 365 | hlist_del_init_rcu(&mn->hlist); |
Robin Holt | 751efd8 | 2013-02-22 16:35:34 -0800 | [diff] [blame] | 366 | spin_unlock(&mm->mmu_notifier_mm->lock); |
Xiao Guangrong | d34883d | 2013-05-24 15:55:11 -0700 | [diff] [blame] | 367 | } |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 368 | |
| 369 | /* |
Xiao Guangrong | d34883d | 2013-05-24 15:55:11 -0700 | [diff] [blame] | 370 | * Wait for any running method to finish, of course including |
Geert Uytterhoeven | 83a35e3 | 2013-06-28 11:27:31 +0200 | [diff] [blame] | 371 | * ->release if it was run by mmu_notifier_release instead of us. |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 372 | */ |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 373 | synchronize_srcu(&srcu); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 374 | |
| 375 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
| 376 | |
| 377 | mmdrop(mm); |
| 378 | } |
| 379 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 380 | |
Peter Zijlstra | b972216 | 2014-08-06 16:08:20 -0700 | [diff] [blame] | 381 | /* |
| 382 | * Same as mmu_notifier_unregister but no callback and no srcu synchronization. |
| 383 | */ |
| 384 | void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, |
| 385 | struct mm_struct *mm) |
| 386 | { |
| 387 | spin_lock(&mm->mmu_notifier_mm->lock); |
| 388 | /* |
| 389 | * Can not use list_del_rcu() since __mmu_notifier_release |
| 390 | * can delete it before we hold the lock. |
| 391 | */ |
| 392 | hlist_del_init_rcu(&mn->hlist); |
| 393 | spin_unlock(&mm->mmu_notifier_mm->lock); |
| 394 | |
| 395 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
| 396 | mmdrop(mm); |
| 397 | } |
| 398 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release); |
Jérôme Glisse | c6d2341 | 2019-05-13 17:21:00 -0700 | [diff] [blame] | 399 | |
| 400 | bool |
| 401 | mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range) |
| 402 | { |
| 403 | if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA) |
| 404 | return false; |
| 405 | /* Return true if the vma still have the read flag set. */ |
| 406 | return range->vma->vm_flags & VM_READ; |
| 407 | } |
| 408 | EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only); |