blob: 62f94cd85455ed8ba16f1376d56c8d7c2f1977ae [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H
4
5#include <linux/list.h>
6#include <linux/spinlock.h>
7#include <linux/mm_types.h>
Sagi Grimberg21a92732012-10-08 16:29:24 -07008#include <linux/srcu.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07009
10struct mmu_notifier;
11struct mmu_notifier_ops;
12
Jérôme Glissed87f0552019-05-13 17:20:45 -070013/**
14 * enum mmu_notifier_event - reason for the mmu notifier callback
15 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
16 * move the range
17 *
18 * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
19 * madvise() or replacing a page by another one, ...).
20 *
21 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
22 * ie using the vma access permission (vm_page_prot) to update the whole range
23 * is enough no need to inspect changes to the CPU page table (mprotect()
24 * syscall)
25 *
26 * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
27 * pages in the range so to mirror those changes the user must inspect the CPU
28 * page table (from the end callback).
29 *
30 * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
31 * access flags). User should soft dirty the page in the end callback to make
32 * sure that anyone relying on soft dirtyness catch pages that might be written
33 * through non CPU mappings.
34 */
35enum mmu_notifier_event {
36 MMU_NOTIFY_UNMAP = 0,
37 MMU_NOTIFY_CLEAR,
38 MMU_NOTIFY_PROTECTION_VMA,
39 MMU_NOTIFY_PROTECTION_PAGE,
40 MMU_NOTIFY_SOFT_DIRTY,
41};
42
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070043#ifdef CONFIG_MMU_NOTIFIER
44
45/*
46 * The mmu notifier_mm structure is allocated and installed in
47 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
48 * critical section and it's released only when mm_count reaches zero
49 * in mmdrop().
50 */
51struct mmu_notifier_mm {
52 /* all mmu notifiers registerd in this mm are queued in this list */
53 struct hlist_head list;
54 /* to serialize the list modifications and hlist_unhashed */
55 spinlock_t lock;
56};
57
Jérôme Glisse27560ee2019-05-13 17:20:42 -070058#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
59
Jérôme Glisse5d6527a2018-12-28 00:38:05 -080060struct mmu_notifier_range {
61 struct mm_struct *mm;
62 unsigned long start;
63 unsigned long end;
Jérôme Glisse27560ee2019-05-13 17:20:42 -070064 unsigned flags;
Jérôme Glisse5d6527a2018-12-28 00:38:05 -080065};
66
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070067struct mmu_notifier_ops {
68 /*
69 * Called either by mmu_notifier_unregister or when the mm is
70 * being destroyed by exit_mmap, always before all pages are
71 * freed. This can run concurrently with other mmu notifier
72 * methods (the ones invoked outside the mm context) and it
73 * should tear down all secondary mmu mappings and freeze the
74 * secondary mmu. If this method isn't implemented you've to
75 * be sure that nothing could possibly write to the pages
76 * through the secondary mmu by the time the last thread with
77 * tsk->mm == mm exits.
78 *
79 * As side note: the pages freed after ->release returns could
80 * be immediately reallocated by the gart at an alias physical
81 * address with a different cache model, so if ->release isn't
82 * implemented because all _software_ driven memory accesses
83 * through the secondary mmu are terminated by the time the
84 * last thread of this mm quits, you've also to be sure that
85 * speculative _hardware_ operations can't allocate dirty
86 * cachelines in the cpu that could not be snooped and made
87 * coherent with the other read and write operations happening
88 * through the gart alias address, so leading to memory
89 * corruption.
90 */
91 void (*release)(struct mmu_notifier *mn,
92 struct mm_struct *mm);
93
94 /*
95 * clear_flush_young is called after the VM is
96 * test-and-clearing the young/accessed bitflag in the
97 * pte. This way the VM will provide proper aging to the
98 * accesses to the page through the secondary MMUs and not
99 * only to the ones through the Linux pte.
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700100 * Start-end is necessary in case the secondary MMU is mapping the page
101 * at a smaller granularity than the primary MMU.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700102 */
103 int (*clear_flush_young)(struct mmu_notifier *mn,
104 struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700105 unsigned long start,
106 unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700107
108 /*
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700109 * clear_young is a lightweight version of clear_flush_young. Like the
110 * latter, it is supposed to test-and-clear the young/accessed bitflag
111 * in the secondary pte, but it may omit flushing the secondary tlb.
112 */
113 int (*clear_young)(struct mmu_notifier *mn,
114 struct mm_struct *mm,
115 unsigned long start,
116 unsigned long end);
117
118 /*
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800119 * test_young is called to check the young/accessed bitflag in
120 * the secondary pte. This is used to know if the page is
121 * frequently used without actually clearing the flag or tearing
122 * down the secondary mapping on the page.
123 */
124 int (*test_young)(struct mmu_notifier *mn,
125 struct mm_struct *mm,
126 unsigned long address);
127
128 /*
Izik Eidus828502d2009-09-21 17:01:51 -0700129 * change_pte is called in cases that pte mapping to page is changed:
130 * for example, when ksm remaps pte to point to a new shared page.
131 */
132 void (*change_pte)(struct mmu_notifier *mn,
133 struct mm_struct *mm,
134 unsigned long address,
135 pte_t pte);
136
137 /*
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700138 * invalidate_range_start() and invalidate_range_end() must be
139 * paired and are called only when the mmap_sem and/or the
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100140 * locks protecting the reverse maps are held. If the subsystem
141 * can't guarantee that no additional references are taken to
142 * the pages in the range, it has to implement the
143 * invalidate_range() notifier to remove any references taken
144 * after invalidate_range_start().
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700145 *
146 * Invalidation of multiple concurrent ranges may be
147 * optionally permitted by the driver. Either way the
148 * establishment of sptes is forbidden in the range passed to
149 * invalidate_range_begin/end for the whole duration of the
150 * invalidate_range_begin/end critical section.
151 *
152 * invalidate_range_start() is called when all pages in the
153 * range are still mapped and have at least a refcount of one.
154 *
155 * invalidate_range_end() is called when all pages in the
156 * range have been unmapped and the pages have been freed by
157 * the VM.
158 *
159 * The VM will remove the page table entries and potentially
160 * the page between invalidate_range_start() and
161 * invalidate_range_end(). If the page must not be freed
162 * because of pending I/O or other circumstances then the
163 * invalidate_range_start() callback (or the initial mapping
164 * by the driver) must make sure that the refcount is kept
165 * elevated.
166 *
167 * If the driver increases the refcount when the pages are
168 * initially mapped into an address space then either
169 * invalidate_range_start() or invalidate_range_end() may
170 * decrease the refcount. If the refcount is decreased on
171 * invalidate_range_start() then the VM can free pages as page
172 * table entries are removed. If the refcount is only
173 * droppped on invalidate_range_end() then the driver itself
174 * will drop the last refcount but it must take care to flush
175 * any secondary tlb before doing the final free on the
176 * page. Pages will no longer be referenced by the linux
177 * address space but may still be referenced by sptes until
178 * the last refcount is dropped.
David Rientjes5ff70912018-01-31 16:18:32 -0800179 *
Michal Hocko93065ac2018-08-21 21:52:33 -0700180 * If blockable argument is set to false then the callback cannot
181 * sleep and has to return with -EAGAIN. 0 should be returned
Michal Hocko33490af2018-10-26 15:03:35 -0700182 * otherwise. Please note that if invalidate_range_start approves
183 * a non-blocking behavior then the same applies to
184 * invalidate_range_end.
Michal Hocko93065ac2018-08-21 21:52:33 -0700185 *
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700186 */
Michal Hocko93065ac2018-08-21 21:52:33 -0700187 int (*invalidate_range_start)(struct mmu_notifier *mn,
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800188 const struct mmu_notifier_range *range);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700189 void (*invalidate_range_end)(struct mmu_notifier *mn,
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800190 const struct mmu_notifier_range *range);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100191
192 /*
193 * invalidate_range() is either called between
194 * invalidate_range_start() and invalidate_range_end() when the
195 * VM has to free pages that where unmapped, but before the
196 * pages are actually freed, or outside of _start()/_end() when
197 * a (remote) TLB is necessary.
198 *
199 * If invalidate_range() is used to manage a non-CPU TLB with
200 * shared page-tables, it not necessary to implement the
201 * invalidate_range_start()/end() notifiers, as
202 * invalidate_range() alread catches the points in time when an
Jérôme Glisse0f108512017-11-15 17:34:07 -0800203 * external TLB range needs to be flushed. For more in depth
Mike Rapoportad56b732018-03-21 21:22:47 +0200204 * discussion on this see Documentation/vm/mmu_notifier.rst
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100205 *
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100206 * Note that this function might be called with just a sub-range
207 * of what was passed to invalidate_range_start()/end(), if
208 * called between those functions.
209 */
210 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
211 unsigned long start, unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700212};
213
214/*
215 * The notifier chains are protected by mmap_sem and/or the reverse map
216 * semaphores. Notifier chains are only changed when all reverse maps and
217 * the mmap_sem locks are taken.
218 *
219 * Therefore notifier chains can only be traversed when either
220 *
221 * 1. mmap_sem is held.
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800222 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700223 * 3. No other concurrent thread can access the list (release)
224 */
225struct mmu_notifier {
226 struct hlist_node hlist;
227 const struct mmu_notifier_ops *ops;
228};
229
230static inline int mm_has_notifiers(struct mm_struct *mm)
231{
232 return unlikely(mm->mmu_notifier_mm);
233}
234
235extern int mmu_notifier_register(struct mmu_notifier *mn,
236 struct mm_struct *mm);
237extern int __mmu_notifier_register(struct mmu_notifier *mn,
238 struct mm_struct *mm);
239extern void mmu_notifier_unregister(struct mmu_notifier *mn,
240 struct mm_struct *mm);
Peter Zijlstrab9722162014-08-06 16:08:20 -0700241extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
242 struct mm_struct *mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700243extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
244extern void __mmu_notifier_release(struct mm_struct *mm);
245extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700246 unsigned long start,
247 unsigned long end);
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700248extern int __mmu_notifier_clear_young(struct mm_struct *mm,
249 unsigned long start,
250 unsigned long end);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800251extern int __mmu_notifier_test_young(struct mm_struct *mm,
252 unsigned long address);
Izik Eidus828502d2009-09-21 17:01:51 -0700253extern void __mmu_notifier_change_pte(struct mm_struct *mm,
254 unsigned long address, pte_t pte);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800255extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
256extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800257 bool only_end);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100258extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
259 unsigned long start, unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700260
Jérôme Glisse4a83bfe2019-05-13 17:20:34 -0700261static inline bool
262mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
263{
Jérôme Glisse27560ee2019-05-13 17:20:42 -0700264 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
Jérôme Glisse4a83bfe2019-05-13 17:20:34 -0700265}
266
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700267static inline void mmu_notifier_release(struct mm_struct *mm)
268{
269 if (mm_has_notifiers(mm))
270 __mmu_notifier_release(mm);
271}
272
273static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700274 unsigned long start,
275 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700276{
277 if (mm_has_notifiers(mm))
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700278 return __mmu_notifier_clear_flush_young(mm, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700279 return 0;
280}
281
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700282static inline int mmu_notifier_clear_young(struct mm_struct *mm,
283 unsigned long start,
284 unsigned long end)
285{
286 if (mm_has_notifiers(mm))
287 return __mmu_notifier_clear_young(mm, start, end);
288 return 0;
289}
290
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800291static inline int mmu_notifier_test_young(struct mm_struct *mm,
292 unsigned long address)
293{
294 if (mm_has_notifiers(mm))
295 return __mmu_notifier_test_young(mm, address);
296 return 0;
297}
298
Izik Eidus828502d2009-09-21 17:01:51 -0700299static inline void mmu_notifier_change_pte(struct mm_struct *mm,
300 unsigned long address, pte_t pte)
301{
302 if (mm_has_notifiers(mm))
303 __mmu_notifier_change_pte(mm, address, pte);
304}
305
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800306static inline void
307mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700308{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800309 if (mm_has_notifiers(range->mm)) {
Jérôme Glisse27560ee2019-05-13 17:20:42 -0700310 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800311 __mmu_notifier_invalidate_range_start(range);
312 }
Michal Hocko93065ac2018-08-21 21:52:33 -0700313}
314
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800315static inline int
316mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
Michal Hocko93065ac2018-08-21 21:52:33 -0700317{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800318 if (mm_has_notifiers(range->mm)) {
Jérôme Glisse27560ee2019-05-13 17:20:42 -0700319 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800320 return __mmu_notifier_invalidate_range_start(range);
321 }
Michal Hocko93065ac2018-08-21 21:52:33 -0700322 return 0;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700323}
324
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800325static inline void
326mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700327{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800328 if (mm_has_notifiers(range->mm))
329 __mmu_notifier_invalidate_range_end(range, false);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800330}
331
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800332static inline void
333mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800334{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800335 if (mm_has_notifiers(range->mm))
336 __mmu_notifier_invalidate_range_end(range, true);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700337}
338
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100339static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
340 unsigned long start, unsigned long end)
341{
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100342 if (mm_has_notifiers(mm))
343 __mmu_notifier_invalidate_range(mm, start, end);
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100344}
345
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700346static inline void mmu_notifier_mm_init(struct mm_struct *mm)
347{
348 mm->mmu_notifier_mm = NULL;
349}
350
351static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
352{
353 if (mm_has_notifiers(mm))
354 __mmu_notifier_mm_destroy(mm);
355}
356
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800357
358static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -0700359 enum mmu_notifier_event event,
360 unsigned flags,
361 struct vm_area_struct *vma,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800362 struct mm_struct *mm,
363 unsigned long start,
364 unsigned long end)
365{
366 range->mm = mm;
367 range->start = start;
368 range->end = end;
Jérôme Glisse27560ee2019-05-13 17:20:42 -0700369 range->flags = 0;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800370}
371
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700372#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
373({ \
374 int __young; \
375 struct vm_area_struct *___vma = __vma; \
376 unsigned long ___address = __address; \
377 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
378 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700379 ___address, \
380 ___address + \
381 PAGE_SIZE); \
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700382 __young; \
383})
384
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800385#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
386({ \
387 int __young; \
388 struct vm_area_struct *___vma = __vma; \
389 unsigned long ___address = __address; \
390 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
391 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700392 ___address, \
393 ___address + \
394 PMD_SIZE); \
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800395 __young; \
396})
397
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700398#define ptep_clear_young_notify(__vma, __address, __ptep) \
399({ \
400 int __young; \
401 struct vm_area_struct *___vma = __vma; \
402 unsigned long ___address = __address; \
403 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
404 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
405 ___address + PAGE_SIZE); \
406 __young; \
407})
408
409#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
410({ \
411 int __young; \
412 struct vm_area_struct *___vma = __vma; \
413 unsigned long ___address = __address; \
414 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
415 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
416 ___address + PMD_SIZE); \
417 __young; \
418})
419
Joerg Roedel34ee6452014-11-13 13:46:09 +1100420#define ptep_clear_flush_notify(__vma, __address, __ptep) \
421({ \
422 unsigned long ___addr = __address & PAGE_MASK; \
423 struct mm_struct *___mm = (__vma)->vm_mm; \
424 pte_t ___pte; \
425 \
426 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
427 mmu_notifier_invalidate_range(___mm, ___addr, \
428 ___addr + PAGE_SIZE); \
429 \
430 ___pte; \
431})
432
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700433#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
Joerg Roedel34ee6452014-11-13 13:46:09 +1100434({ \
435 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
436 struct mm_struct *___mm = (__vma)->vm_mm; \
437 pmd_t ___pmd; \
438 \
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700439 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
Joerg Roedel34ee6452014-11-13 13:46:09 +1100440 mmu_notifier_invalidate_range(___mm, ___haddr, \
441 ___haddr + HPAGE_PMD_SIZE); \
442 \
443 ___pmd; \
444})
445
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800446#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
447({ \
448 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
449 struct mm_struct *___mm = (__vma)->vm_mm; \
450 pud_t ___pud; \
451 \
452 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
453 mmu_notifier_invalidate_range(___mm, ___haddr, \
454 ___haddr + HPAGE_PUD_SIZE); \
455 \
456 ___pud; \
457})
458
Xiao Guangrong48af0d72012-10-08 16:29:23 -0700459/*
460 * set_pte_at_notify() sets the pte _after_ running the notifier.
461 * This is safe to start by updating the secondary MMUs, because the primary MMU
462 * pte invalidate must have already happened with a ptep_clear_flush() before
463 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
464 * required when we change both the protection of the mapping from read-only to
465 * read-write and the pfn (like during copy on write page faults). Otherwise the
466 * old page would remain mapped readonly in the secondary MMUs after the new
467 * page is already writable by some CPU through the primary MMU.
468 */
Izik Eidus828502d2009-09-21 17:01:51 -0700469#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
470({ \
471 struct mm_struct *___mm = __mm; \
472 unsigned long ___address = __address; \
473 pte_t ___pte = __pte; \
474 \
Izik Eidus828502d2009-09-21 17:01:51 -0700475 mmu_notifier_change_pte(___mm, ___address, ___pte); \
Xiao Guangrong48af0d72012-10-08 16:29:23 -0700476 set_pte_at(___mm, ___address, __ptep, ___pte); \
Izik Eidus828502d2009-09-21 17:01:51 -0700477})
478
Peter Zijlstrab9722162014-08-06 16:08:20 -0700479extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
480 void (*func)(struct rcu_head *rcu));
Peter Zijlstrab9722162014-08-06 16:08:20 -0700481
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700482#else /* CONFIG_MMU_NOTIFIER */
483
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800484struct mmu_notifier_range {
485 unsigned long start;
486 unsigned long end;
487};
488
489static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
490 unsigned long start,
491 unsigned long end)
492{
493 range->start = start;
494 range->end = end;
495}
496
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -0700497#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800498 _mmu_notifier_range_init(range, start, end)
499
Jérôme Glisse4a83bfe2019-05-13 17:20:34 -0700500static inline bool
501mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
502{
503 return true;
504}
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800505
Michal Hocko4d4bbd82017-10-03 16:14:50 -0700506static inline int mm_has_notifiers(struct mm_struct *mm)
507{
508 return 0;
509}
510
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700511static inline void mmu_notifier_release(struct mm_struct *mm)
512{
513}
514
515static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700516 unsigned long start,
517 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700518{
519 return 0;
520}
521
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800522static inline int mmu_notifier_test_young(struct mm_struct *mm,
523 unsigned long address)
524{
525 return 0;
526}
527
Izik Eidus828502d2009-09-21 17:01:51 -0700528static inline void mmu_notifier_change_pte(struct mm_struct *mm,
529 unsigned long address, pte_t pte)
530{
531}
532
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800533static inline void
534mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700535{
536}
537
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800538static inline int
539mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
Michal Hocko93065ac2018-08-21 21:52:33 -0700540{
541 return 0;
542}
543
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800544static inline
545void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700546{
547}
548
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800549static inline void
550mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800551{
552}
553
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100554static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
555 unsigned long start, unsigned long end)
556{
557}
558
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700559static inline void mmu_notifier_mm_init(struct mm_struct *mm)
560{
561}
562
563static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
564{
565}
566
567#define ptep_clear_flush_young_notify ptep_clear_flush_young
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800568#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700569#define ptep_clear_young_notify ptep_test_and_clear_young
570#define pmdp_clear_young_notify pmdp_test_and_clear_young
Joerg Roedel34ee6452014-11-13 13:46:09 +1100571#define ptep_clear_flush_notify ptep_clear_flush
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700572#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800573#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
Izik Eidus828502d2009-09-21 17:01:51 -0700574#define set_pte_at_notify set_pte_at
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700575
576#endif /* CONFIG_MMU_NOTIFIER */
577
578#endif /* _LINUX_MMU_NOTIFIER_H */