Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_MMU_NOTIFIER_H |
| 3 | #define _LINUX_MMU_NOTIFIER_H |
| 4 | |
| 5 | #include <linux/list.h> |
| 6 | #include <linux/spinlock.h> |
| 7 | #include <linux/mm_types.h> |
Sagi Grimberg | 21a9273 | 2012-10-08 16:29:24 -0700 | [diff] [blame] | 8 | #include <linux/srcu.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 9 | |
| 10 | struct mmu_notifier; |
| 11 | struct mmu_notifier_ops; |
| 12 | |
| 13 | #ifdef CONFIG_MMU_NOTIFIER |
| 14 | |
| 15 | /* |
| 16 | * The mmu notifier_mm structure is allocated and installed in |
| 17 | * mm->mmu_notifier_mm inside the mm_take_all_locks() protected |
| 18 | * critical section and it's released only when mm_count reaches zero |
| 19 | * in mmdrop(). |
| 20 | */ |
| 21 | struct mmu_notifier_mm { |
| 22 | /* all mmu notifiers registerd in this mm are queued in this list */ |
| 23 | struct hlist_head list; |
| 24 | /* to serialize the list modifications and hlist_unhashed */ |
| 25 | spinlock_t lock; |
| 26 | }; |
| 27 | |
| 28 | struct mmu_notifier_ops { |
| 29 | /* |
| 30 | * Called either by mmu_notifier_unregister or when the mm is |
| 31 | * being destroyed by exit_mmap, always before all pages are |
| 32 | * freed. This can run concurrently with other mmu notifier |
| 33 | * methods (the ones invoked outside the mm context) and it |
| 34 | * should tear down all secondary mmu mappings and freeze the |
| 35 | * secondary mmu. If this method isn't implemented you've to |
| 36 | * be sure that nothing could possibly write to the pages |
| 37 | * through the secondary mmu by the time the last thread with |
| 38 | * tsk->mm == mm exits. |
| 39 | * |
| 40 | * As side note: the pages freed after ->release returns could |
| 41 | * be immediately reallocated by the gart at an alias physical |
| 42 | * address with a different cache model, so if ->release isn't |
| 43 | * implemented because all _software_ driven memory accesses |
| 44 | * through the secondary mmu are terminated by the time the |
| 45 | * last thread of this mm quits, you've also to be sure that |
| 46 | * speculative _hardware_ operations can't allocate dirty |
| 47 | * cachelines in the cpu that could not be snooped and made |
| 48 | * coherent with the other read and write operations happening |
| 49 | * through the gart alias address, so leading to memory |
| 50 | * corruption. |
| 51 | */ |
| 52 | void (*release)(struct mmu_notifier *mn, |
| 53 | struct mm_struct *mm); |
| 54 | |
| 55 | /* |
| 56 | * clear_flush_young is called after the VM is |
| 57 | * test-and-clearing the young/accessed bitflag in the |
| 58 | * pte. This way the VM will provide proper aging to the |
| 59 | * accesses to the page through the secondary MMUs and not |
| 60 | * only to the ones through the Linux pte. |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 61 | * Start-end is necessary in case the secondary MMU is mapping the page |
| 62 | * at a smaller granularity than the primary MMU. |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 63 | */ |
| 64 | int (*clear_flush_young)(struct mmu_notifier *mn, |
| 65 | struct mm_struct *mm, |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 66 | unsigned long start, |
| 67 | unsigned long end); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 68 | |
| 69 | /* |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 70 | * clear_young is a lightweight version of clear_flush_young. Like the |
| 71 | * latter, it is supposed to test-and-clear the young/accessed bitflag |
| 72 | * in the secondary pte, but it may omit flushing the secondary tlb. |
| 73 | */ |
| 74 | int (*clear_young)(struct mmu_notifier *mn, |
| 75 | struct mm_struct *mm, |
| 76 | unsigned long start, |
| 77 | unsigned long end); |
| 78 | |
| 79 | /* |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 80 | * test_young is called to check the young/accessed bitflag in |
| 81 | * the secondary pte. This is used to know if the page is |
| 82 | * frequently used without actually clearing the flag or tearing |
| 83 | * down the secondary mapping on the page. |
| 84 | */ |
| 85 | int (*test_young)(struct mmu_notifier *mn, |
| 86 | struct mm_struct *mm, |
| 87 | unsigned long address); |
| 88 | |
| 89 | /* |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 90 | * change_pte is called in cases that pte mapping to page is changed: |
| 91 | * for example, when ksm remaps pte to point to a new shared page. |
| 92 | */ |
| 93 | void (*change_pte)(struct mmu_notifier *mn, |
| 94 | struct mm_struct *mm, |
| 95 | unsigned long address, |
| 96 | pte_t pte); |
| 97 | |
| 98 | /* |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 99 | * invalidate_range_start() and invalidate_range_end() must be |
| 100 | * paired and are called only when the mmap_sem and/or the |
Joerg Roedel | 0f0a327 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 101 | * locks protecting the reverse maps are held. If the subsystem |
| 102 | * can't guarantee that no additional references are taken to |
| 103 | * the pages in the range, it has to implement the |
| 104 | * invalidate_range() notifier to remove any references taken |
| 105 | * after invalidate_range_start(). |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 106 | * |
| 107 | * Invalidation of multiple concurrent ranges may be |
| 108 | * optionally permitted by the driver. Either way the |
| 109 | * establishment of sptes is forbidden in the range passed to |
| 110 | * invalidate_range_begin/end for the whole duration of the |
| 111 | * invalidate_range_begin/end critical section. |
| 112 | * |
| 113 | * invalidate_range_start() is called when all pages in the |
| 114 | * range are still mapped and have at least a refcount of one. |
| 115 | * |
| 116 | * invalidate_range_end() is called when all pages in the |
| 117 | * range have been unmapped and the pages have been freed by |
| 118 | * the VM. |
| 119 | * |
| 120 | * The VM will remove the page table entries and potentially |
| 121 | * the page between invalidate_range_start() and |
| 122 | * invalidate_range_end(). If the page must not be freed |
| 123 | * because of pending I/O or other circumstances then the |
| 124 | * invalidate_range_start() callback (or the initial mapping |
| 125 | * by the driver) must make sure that the refcount is kept |
| 126 | * elevated. |
| 127 | * |
| 128 | * If the driver increases the refcount when the pages are |
| 129 | * initially mapped into an address space then either |
| 130 | * invalidate_range_start() or invalidate_range_end() may |
| 131 | * decrease the refcount. If the refcount is decreased on |
| 132 | * invalidate_range_start() then the VM can free pages as page |
| 133 | * table entries are removed. If the refcount is only |
| 134 | * droppped on invalidate_range_end() then the driver itself |
| 135 | * will drop the last refcount but it must take care to flush |
| 136 | * any secondary tlb before doing the final free on the |
| 137 | * page. Pages will no longer be referenced by the linux |
| 138 | * address space but may still be referenced by sptes until |
| 139 | * the last refcount is dropped. |
| 140 | */ |
| 141 | void (*invalidate_range_start)(struct mmu_notifier *mn, |
| 142 | struct mm_struct *mm, |
| 143 | unsigned long start, unsigned long end); |
| 144 | void (*invalidate_range_end)(struct mmu_notifier *mn, |
| 145 | struct mm_struct *mm, |
| 146 | unsigned long start, unsigned long end); |
Joerg Roedel | 0f0a327 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 147 | |
| 148 | /* |
| 149 | * invalidate_range() is either called between |
| 150 | * invalidate_range_start() and invalidate_range_end() when the |
| 151 | * VM has to free pages that where unmapped, but before the |
| 152 | * pages are actually freed, or outside of _start()/_end() when |
| 153 | * a (remote) TLB is necessary. |
| 154 | * |
| 155 | * If invalidate_range() is used to manage a non-CPU TLB with |
| 156 | * shared page-tables, it not necessary to implement the |
| 157 | * invalidate_range_start()/end() notifiers, as |
| 158 | * invalidate_range() alread catches the points in time when an |
| 159 | * external TLB range needs to be flushed. |
| 160 | * |
| 161 | * The invalidate_range() function is called under the ptl |
| 162 | * spin-lock and not allowed to sleep. |
| 163 | * |
| 164 | * Note that this function might be called with just a sub-range |
| 165 | * of what was passed to invalidate_range_start()/end(), if |
| 166 | * called between those functions. |
| 167 | */ |
| 168 | void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, |
| 169 | unsigned long start, unsigned long end); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 170 | }; |
| 171 | |
| 172 | /* |
| 173 | * The notifier chains are protected by mmap_sem and/or the reverse map |
| 174 | * semaphores. Notifier chains are only changed when all reverse maps and |
| 175 | * the mmap_sem locks are taken. |
| 176 | * |
| 177 | * Therefore notifier chains can only be traversed when either |
| 178 | * |
| 179 | * 1. mmap_sem is held. |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 180 | * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem). |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 181 | * 3. No other concurrent thread can access the list (release) |
| 182 | */ |
| 183 | struct mmu_notifier { |
| 184 | struct hlist_node hlist; |
| 185 | const struct mmu_notifier_ops *ops; |
| 186 | }; |
| 187 | |
| 188 | static inline int mm_has_notifiers(struct mm_struct *mm) |
| 189 | { |
| 190 | return unlikely(mm->mmu_notifier_mm); |
| 191 | } |
| 192 | |
| 193 | extern int mmu_notifier_register(struct mmu_notifier *mn, |
| 194 | struct mm_struct *mm); |
| 195 | extern int __mmu_notifier_register(struct mmu_notifier *mn, |
| 196 | struct mm_struct *mm); |
| 197 | extern void mmu_notifier_unregister(struct mmu_notifier *mn, |
| 198 | struct mm_struct *mm); |
Peter Zijlstra | b972216 | 2014-08-06 16:08:20 -0700 | [diff] [blame] | 199 | extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, |
| 200 | struct mm_struct *mm); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 201 | extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); |
| 202 | extern void __mmu_notifier_release(struct mm_struct *mm); |
| 203 | extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 204 | unsigned long start, |
| 205 | unsigned long end); |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 206 | extern int __mmu_notifier_clear_young(struct mm_struct *mm, |
| 207 | unsigned long start, |
| 208 | unsigned long end); |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 209 | extern int __mmu_notifier_test_young(struct mm_struct *mm, |
| 210 | unsigned long address); |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 211 | extern void __mmu_notifier_change_pte(struct mm_struct *mm, |
| 212 | unsigned long address, pte_t pte); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 213 | extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, |
| 214 | unsigned long start, unsigned long end); |
| 215 | extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
| 216 | unsigned long start, unsigned long end); |
Joerg Roedel | 0f0a327 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 217 | extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, |
| 218 | unsigned long start, unsigned long end); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 219 | |
| 220 | static inline void mmu_notifier_release(struct mm_struct *mm) |
| 221 | { |
| 222 | if (mm_has_notifiers(mm)) |
| 223 | __mmu_notifier_release(mm); |
| 224 | } |
| 225 | |
| 226 | static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 227 | unsigned long start, |
| 228 | unsigned long end) |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 229 | { |
| 230 | if (mm_has_notifiers(mm)) |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 231 | return __mmu_notifier_clear_flush_young(mm, start, end); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 232 | return 0; |
| 233 | } |
| 234 | |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 235 | static inline int mmu_notifier_clear_young(struct mm_struct *mm, |
| 236 | unsigned long start, |
| 237 | unsigned long end) |
| 238 | { |
| 239 | if (mm_has_notifiers(mm)) |
| 240 | return __mmu_notifier_clear_young(mm, start, end); |
| 241 | return 0; |
| 242 | } |
| 243 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 244 | static inline int mmu_notifier_test_young(struct mm_struct *mm, |
| 245 | unsigned long address) |
| 246 | { |
| 247 | if (mm_has_notifiers(mm)) |
| 248 | return __mmu_notifier_test_young(mm, address); |
| 249 | return 0; |
| 250 | } |
| 251 | |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 252 | static inline void mmu_notifier_change_pte(struct mm_struct *mm, |
| 253 | unsigned long address, pte_t pte) |
| 254 | { |
| 255 | if (mm_has_notifiers(mm)) |
| 256 | __mmu_notifier_change_pte(mm, address, pte); |
| 257 | } |
| 258 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 259 | static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, |
| 260 | unsigned long start, unsigned long end) |
| 261 | { |
| 262 | if (mm_has_notifiers(mm)) |
| 263 | __mmu_notifier_invalidate_range_start(mm, start, end); |
| 264 | } |
| 265 | |
| 266 | static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
| 267 | unsigned long start, unsigned long end) |
| 268 | { |
| 269 | if (mm_has_notifiers(mm)) |
| 270 | __mmu_notifier_invalidate_range_end(mm, start, end); |
| 271 | } |
| 272 | |
Joerg Roedel | 1897bdc | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 273 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, |
| 274 | unsigned long start, unsigned long end) |
| 275 | { |
Joerg Roedel | 0f0a327 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 276 | if (mm_has_notifiers(mm)) |
| 277 | __mmu_notifier_invalidate_range(mm, start, end); |
Joerg Roedel | 1897bdc | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 278 | } |
| 279 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 280 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) |
| 281 | { |
| 282 | mm->mmu_notifier_mm = NULL; |
| 283 | } |
| 284 | |
| 285 | static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) |
| 286 | { |
| 287 | if (mm_has_notifiers(mm)) |
| 288 | __mmu_notifier_mm_destroy(mm); |
| 289 | } |
| 290 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 291 | #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ |
| 292 | ({ \ |
| 293 | int __young; \ |
| 294 | struct vm_area_struct *___vma = __vma; \ |
| 295 | unsigned long ___address = __address; \ |
| 296 | __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ |
| 297 | __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 298 | ___address, \ |
| 299 | ___address + \ |
| 300 | PAGE_SIZE); \ |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 301 | __young; \ |
| 302 | }) |
| 303 | |
Andrea Arcangeli | 91a4ee2 | 2011-01-13 15:46:44 -0800 | [diff] [blame] | 304 | #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \ |
| 305 | ({ \ |
| 306 | int __young; \ |
| 307 | struct vm_area_struct *___vma = __vma; \ |
| 308 | unsigned long ___address = __address; \ |
| 309 | __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \ |
| 310 | __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 311 | ___address, \ |
| 312 | ___address + \ |
| 313 | PMD_SIZE); \ |
Andrea Arcangeli | 91a4ee2 | 2011-01-13 15:46:44 -0800 | [diff] [blame] | 314 | __young; \ |
| 315 | }) |
| 316 | |
Vladimir Davydov | 1d7715c | 2015-09-09 15:35:41 -0700 | [diff] [blame] | 317 | #define ptep_clear_young_notify(__vma, __address, __ptep) \ |
| 318 | ({ \ |
| 319 | int __young; \ |
| 320 | struct vm_area_struct *___vma = __vma; \ |
| 321 | unsigned long ___address = __address; \ |
| 322 | __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\ |
| 323 | __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ |
| 324 | ___address + PAGE_SIZE); \ |
| 325 | __young; \ |
| 326 | }) |
| 327 | |
| 328 | #define pmdp_clear_young_notify(__vma, __address, __pmdp) \ |
| 329 | ({ \ |
| 330 | int __young; \ |
| 331 | struct vm_area_struct *___vma = __vma; \ |
| 332 | unsigned long ___address = __address; \ |
| 333 | __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\ |
| 334 | __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \ |
| 335 | ___address + PMD_SIZE); \ |
| 336 | __young; \ |
| 337 | }) |
| 338 | |
Joerg Roedel | 34ee645 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 339 | #define ptep_clear_flush_notify(__vma, __address, __ptep) \ |
| 340 | ({ \ |
| 341 | unsigned long ___addr = __address & PAGE_MASK; \ |
| 342 | struct mm_struct *___mm = (__vma)->vm_mm; \ |
| 343 | pte_t ___pte; \ |
| 344 | \ |
| 345 | ___pte = ptep_clear_flush(__vma, __address, __ptep); \ |
| 346 | mmu_notifier_invalidate_range(___mm, ___addr, \ |
| 347 | ___addr + PAGE_SIZE); \ |
| 348 | \ |
| 349 | ___pte; \ |
| 350 | }) |
| 351 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 352 | #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \ |
Joerg Roedel | 34ee645 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 353 | ({ \ |
| 354 | unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ |
| 355 | struct mm_struct *___mm = (__vma)->vm_mm; \ |
| 356 | pmd_t ___pmd; \ |
| 357 | \ |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 358 | ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \ |
Joerg Roedel | 34ee645 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 359 | mmu_notifier_invalidate_range(___mm, ___haddr, \ |
| 360 | ___haddr + HPAGE_PMD_SIZE); \ |
| 361 | \ |
| 362 | ___pmd; \ |
| 363 | }) |
| 364 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 365 | #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \ |
| 366 | ({ \ |
| 367 | unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \ |
| 368 | struct mm_struct *___mm = (__vma)->vm_mm; \ |
| 369 | pud_t ___pud; \ |
| 370 | \ |
| 371 | ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \ |
| 372 | mmu_notifier_invalidate_range(___mm, ___haddr, \ |
| 373 | ___haddr + HPAGE_PUD_SIZE); \ |
| 374 | \ |
| 375 | ___pud; \ |
| 376 | }) |
| 377 | |
Xiao Guangrong | 48af0d7 | 2012-10-08 16:29:23 -0700 | [diff] [blame] | 378 | /* |
| 379 | * set_pte_at_notify() sets the pte _after_ running the notifier. |
| 380 | * This is safe to start by updating the secondary MMUs, because the primary MMU |
| 381 | * pte invalidate must have already happened with a ptep_clear_flush() before |
| 382 | * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is |
| 383 | * required when we change both the protection of the mapping from read-only to |
| 384 | * read-write and the pfn (like during copy on write page faults). Otherwise the |
| 385 | * old page would remain mapped readonly in the secondary MMUs after the new |
| 386 | * page is already writable by some CPU through the primary MMU. |
| 387 | */ |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 388 | #define set_pte_at_notify(__mm, __address, __ptep, __pte) \ |
| 389 | ({ \ |
| 390 | struct mm_struct *___mm = __mm; \ |
| 391 | unsigned long ___address = __address; \ |
| 392 | pte_t ___pte = __pte; \ |
| 393 | \ |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 394 | mmu_notifier_change_pte(___mm, ___address, ___pte); \ |
Xiao Guangrong | 48af0d7 | 2012-10-08 16:29:23 -0700 | [diff] [blame] | 395 | set_pte_at(___mm, ___address, __ptep, ___pte); \ |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 396 | }) |
| 397 | |
Peter Zijlstra | b972216 | 2014-08-06 16:08:20 -0700 | [diff] [blame] | 398 | extern void mmu_notifier_call_srcu(struct rcu_head *rcu, |
| 399 | void (*func)(struct rcu_head *rcu)); |
| 400 | extern void mmu_notifier_synchronize(void); |
| 401 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 402 | #else /* CONFIG_MMU_NOTIFIER */ |
| 403 | |
Michal Hocko | 4d4bbd8 | 2017-10-03 16:14:50 -0700 | [diff] [blame] | 404 | static inline int mm_has_notifiers(struct mm_struct *mm) |
| 405 | { |
| 406 | return 0; |
| 407 | } |
| 408 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 409 | static inline void mmu_notifier_release(struct mm_struct *mm) |
| 410 | { |
| 411 | } |
| 412 | |
| 413 | static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 414 | unsigned long start, |
| 415 | unsigned long end) |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 416 | { |
| 417 | return 0; |
| 418 | } |
| 419 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 420 | static inline int mmu_notifier_test_young(struct mm_struct *mm, |
| 421 | unsigned long address) |
| 422 | { |
| 423 | return 0; |
| 424 | } |
| 425 | |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 426 | static inline void mmu_notifier_change_pte(struct mm_struct *mm, |
| 427 | unsigned long address, pte_t pte) |
| 428 | { |
| 429 | } |
| 430 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 431 | static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, |
| 432 | unsigned long start, unsigned long end) |
| 433 | { |
| 434 | } |
| 435 | |
| 436 | static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
| 437 | unsigned long start, unsigned long end) |
| 438 | { |
| 439 | } |
| 440 | |
Joerg Roedel | 1897bdc | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 441 | static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, |
| 442 | unsigned long start, unsigned long end) |
| 443 | { |
| 444 | } |
| 445 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 446 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) |
| 447 | { |
| 448 | } |
| 449 | |
| 450 | static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) |
| 451 | { |
| 452 | } |
| 453 | |
| 454 | #define ptep_clear_flush_young_notify ptep_clear_flush_young |
Andrea Arcangeli | 91a4ee2 | 2011-01-13 15:46:44 -0800 | [diff] [blame] | 455 | #define pmdp_clear_flush_young_notify pmdp_clear_flush_young |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 456 | #define ptep_clear_young_notify ptep_test_and_clear_young |
| 457 | #define pmdp_clear_young_notify pmdp_test_and_clear_young |
Joerg Roedel | 34ee645 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 458 | #define ptep_clear_flush_notify ptep_clear_flush |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 459 | #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 460 | #define pudp_huge_clear_flush_notify pudp_huge_clear_flush |
Izik Eidus | 828502d | 2009-09-21 17:01:51 -0700 | [diff] [blame] | 461 | #define set_pte_at_notify set_pte_at |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 462 | |
| 463 | #endif /* CONFIG_MMU_NOTIFIER */ |
| 464 | |
| 465 | #endif /* _LINUX_MMU_NOTIFIER_H */ |