blob: 1a6a9eb6d3fac950d50d4860ae86bee959465efc [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H
4
5#include <linux/list.h>
6#include <linux/spinlock.h>
7#include <linux/mm_types.h>
Michel Lespinasseb72327f2020-06-08 21:33:18 -07008#include <linux/mmap_lock.h>
Sagi Grimberg21a92732012-10-08 16:29:24 -07009#include <linux/srcu.h>
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040010#include <linux/interval_tree.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070011
Jason Gunthorpe984cfe42019-12-18 13:40:35 -040012struct mmu_notifier_subscriptions;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070013struct mmu_notifier;
Jason Gunthorpe56f434f2019-11-12 16:22:18 -040014struct mmu_notifier_range;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040015struct mmu_interval_notifier;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070016
Jérôme Glissed87f0552019-05-13 17:20:45 -070017/**
18 * enum mmu_notifier_event - reason for the mmu notifier callback
19 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
20 * move the range
21 *
22 * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
23 * madvise() or replacing a page by another one, ...).
24 *
25 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
26 * ie using the vma access permission (vm_page_prot) to update the whole range
27 * is enough no need to inspect changes to the CPU page table (mprotect()
28 * syscall)
29 *
30 * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
31 * pages in the range so to mirror those changes the user must inspect the CPU
32 * page table (from the end callback).
33 *
34 * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
35 * access flags). User should soft dirty the page in the end callback to make
36 * sure that anyone relying on soft dirtyness catch pages that might be written
37 * through non CPU mappings.
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040038 *
39 * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
40 * that the mm refcount is zero and the range is no longer accessible.
Ralph Campbell998427b2020-07-23 15:30:01 -070041 *
42 * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
43 * a device driver to possibly ignore the invalidation if the
44 * migrate_pgmap_owner field matches the driver's device private pgmap owner.
Jérôme Glissed87f0552019-05-13 17:20:45 -070045 */
46enum mmu_notifier_event {
47 MMU_NOTIFY_UNMAP = 0,
48 MMU_NOTIFY_CLEAR,
49 MMU_NOTIFY_PROTECTION_VMA,
50 MMU_NOTIFY_PROTECTION_PAGE,
51 MMU_NOTIFY_SOFT_DIRTY,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -040052 MMU_NOTIFY_RELEASE,
Ralph Campbell998427b2020-07-23 15:30:01 -070053 MMU_NOTIFY_MIGRATE,
Jérôme Glissed87f0552019-05-13 17:20:45 -070054};
55
Jérôme Glisse27560ee2019-05-13 17:20:42 -070056#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
57
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070058struct mmu_notifier_ops {
59 /*
60 * Called either by mmu_notifier_unregister or when the mm is
61 * being destroyed by exit_mmap, always before all pages are
62 * freed. This can run concurrently with other mmu notifier
63 * methods (the ones invoked outside the mm context) and it
64 * should tear down all secondary mmu mappings and freeze the
65 * secondary mmu. If this method isn't implemented you've to
66 * be sure that nothing could possibly write to the pages
67 * through the secondary mmu by the time the last thread with
68 * tsk->mm == mm exits.
69 *
70 * As side note: the pages freed after ->release returns could
71 * be immediately reallocated by the gart at an alias physical
72 * address with a different cache model, so if ->release isn't
73 * implemented because all _software_ driven memory accesses
74 * through the secondary mmu are terminated by the time the
75 * last thread of this mm quits, you've also to be sure that
76 * speculative _hardware_ operations can't allocate dirty
77 * cachelines in the cpu that could not be snooped and made
78 * coherent with the other read and write operations happening
79 * through the gart alias address, so leading to memory
80 * corruption.
81 */
Jason Gunthorpe19917222020-01-14 11:11:17 -040082 void (*release)(struct mmu_notifier *subscription,
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070083 struct mm_struct *mm);
84
85 /*
86 * clear_flush_young is called after the VM is
87 * test-and-clearing the young/accessed bitflag in the
88 * pte. This way the VM will provide proper aging to the
89 * accesses to the page through the secondary MMUs and not
90 * only to the ones through the Linux pte.
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070091 * Start-end is necessary in case the secondary MMU is mapping the page
92 * at a smaller granularity than the primary MMU.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070093 */
Jason Gunthorpe19917222020-01-14 11:11:17 -040094 int (*clear_flush_young)(struct mmu_notifier *subscription,
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070095 struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -070096 unsigned long start,
97 unsigned long end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070098
99 /*
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700100 * clear_young is a lightweight version of clear_flush_young. Like the
101 * latter, it is supposed to test-and-clear the young/accessed bitflag
102 * in the secondary pte, but it may omit flushing the secondary tlb.
103 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400104 int (*clear_young)(struct mmu_notifier *subscription,
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700105 struct mm_struct *mm,
106 unsigned long start,
107 unsigned long end);
108
109 /*
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800110 * test_young is called to check the young/accessed bitflag in
111 * the secondary pte. This is used to know if the page is
112 * frequently used without actually clearing the flag or tearing
113 * down the secondary mapping on the page.
114 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400115 int (*test_young)(struct mmu_notifier *subscription,
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800116 struct mm_struct *mm,
117 unsigned long address);
118
119 /*
Izik Eidus828502d2009-09-21 17:01:51 -0700120 * change_pte is called in cases that pte mapping to page is changed:
121 * for example, when ksm remaps pte to point to a new shared page.
122 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400123 void (*change_pte)(struct mmu_notifier *subscription,
Izik Eidus828502d2009-09-21 17:01:51 -0700124 struct mm_struct *mm,
125 unsigned long address,
126 pte_t pte);
127
128 /*
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700129 * invalidate_range_start() and invalidate_range_end() must be
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700130 * paired and are called only when the mmap_lock and/or the
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100131 * locks protecting the reverse maps are held. If the subsystem
132 * can't guarantee that no additional references are taken to
133 * the pages in the range, it has to implement the
134 * invalidate_range() notifier to remove any references taken
135 * after invalidate_range_start().
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700136 *
137 * Invalidation of multiple concurrent ranges may be
138 * optionally permitted by the driver. Either way the
139 * establishment of sptes is forbidden in the range passed to
140 * invalidate_range_begin/end for the whole duration of the
141 * invalidate_range_begin/end critical section.
142 *
143 * invalidate_range_start() is called when all pages in the
144 * range are still mapped and have at least a refcount of one.
145 *
146 * invalidate_range_end() is called when all pages in the
147 * range have been unmapped and the pages have been freed by
148 * the VM.
149 *
150 * The VM will remove the page table entries and potentially
151 * the page between invalidate_range_start() and
152 * invalidate_range_end(). If the page must not be freed
153 * because of pending I/O or other circumstances then the
154 * invalidate_range_start() callback (or the initial mapping
155 * by the driver) must make sure that the refcount is kept
156 * elevated.
157 *
158 * If the driver increases the refcount when the pages are
159 * initially mapped into an address space then either
160 * invalidate_range_start() or invalidate_range_end() may
161 * decrease the refcount. If the refcount is decreased on
162 * invalidate_range_start() then the VM can free pages as page
163 * table entries are removed. If the refcount is only
164 * droppped on invalidate_range_end() then the driver itself
165 * will drop the last refcount but it must take care to flush
166 * any secondary tlb before doing the final free on the
167 * page. Pages will no longer be referenced by the linux
168 * address space but may still be referenced by sptes until
169 * the last refcount is dropped.
David Rientjes5ff70912018-01-31 16:18:32 -0800170 *
Michal Hocko93065ac2018-08-21 21:52:33 -0700171 * If blockable argument is set to false then the callback cannot
Sean Christophersonde2e6b42021-03-24 21:37:23 -0700172 * sleep and has to return with -EAGAIN if sleeping would be required.
173 * 0 should be returned otherwise. Please note that notifiers that can
174 * fail invalidate_range_start are not allowed to implement
175 * invalidate_range_end, as there is no mechanism for informing the
176 * notifier that its start failed.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700177 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400178 int (*invalidate_range_start)(struct mmu_notifier *subscription,
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800179 const struct mmu_notifier_range *range);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400180 void (*invalidate_range_end)(struct mmu_notifier *subscription,
Jérôme Glisse5d6527a2018-12-28 00:38:05 -0800181 const struct mmu_notifier_range *range);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100182
183 /*
184 * invalidate_range() is either called between
185 * invalidate_range_start() and invalidate_range_end() when the
186 * VM has to free pages that where unmapped, but before the
187 * pages are actually freed, or outside of _start()/_end() when
188 * a (remote) TLB is necessary.
189 *
190 * If invalidate_range() is used to manage a non-CPU TLB with
191 * shared page-tables, it not necessary to implement the
192 * invalidate_range_start()/end() notifiers, as
193 * invalidate_range() alread catches the points in time when an
Jérôme Glisse0f108512017-11-15 17:34:07 -0800194 * external TLB range needs to be flushed. For more in depth
Mike Rapoportad56b732018-03-21 21:22:47 +0200195 * discussion on this see Documentation/vm/mmu_notifier.rst
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100196 *
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100197 * Note that this function might be called with just a sub-range
198 * of what was passed to invalidate_range_start()/end(), if
199 * called between those functions.
200 */
Jason Gunthorpe19917222020-01-14 11:11:17 -0400201 void (*invalidate_range)(struct mmu_notifier *subscription,
202 struct mm_struct *mm,
203 unsigned long start,
204 unsigned long end);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300205
206 /*
207 * These callbacks are used with the get/put interface to manage the
208 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
209 * notifier for use with the mm.
210 *
211 * free_notifier() is only called after the mmu_notifier has been
212 * fully put, calls to any ops callback are prevented and no ops
213 * callbacks are currently running. It is called from a SRCU callback
214 * and cannot sleep.
215 */
216 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400217 void (*free_notifier)(struct mmu_notifier *subscription);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700218};
219
220/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700221 * The notifier chains are protected by mmap_lock and/or the reverse map
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700222 * semaphores. Notifier chains are only changed when all reverse maps and
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700223 * the mmap_lock locks are taken.
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700224 *
225 * Therefore notifier chains can only be traversed when either
226 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700227 * 1. mmap_lock is held.
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800228 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700229 * 3. No other concurrent thread can access the list (release)
230 */
231struct mmu_notifier {
232 struct hlist_node hlist;
233 const struct mmu_notifier_ops *ops;
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300234 struct mm_struct *mm;
235 struct rcu_head rcu;
236 unsigned int users;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700237};
238
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400239/**
240 * struct mmu_interval_notifier_ops
241 * @invalidate: Upon return the caller must stop using any SPTEs within this
242 * range. This function can sleep. Return false only if sleeping
243 * was required but mmu_notifier_range_blockable(range) is false.
244 */
245struct mmu_interval_notifier_ops {
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400246 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400247 const struct mmu_notifier_range *range,
248 unsigned long cur_seq);
249};
250
251struct mmu_interval_notifier {
252 struct interval_tree_node interval_tree;
253 const struct mmu_interval_notifier_ops *ops;
254 struct mm_struct *mm;
255 struct hlist_node deferred_item;
256 unsigned long invalidate_seq;
257};
258
Jason Gunthorpe56f434f2019-11-12 16:22:18 -0400259#ifdef CONFIG_MMU_NOTIFIER
260
261#ifdef CONFIG_LOCKDEP
262extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
263#endif
264
265struct mmu_notifier_range {
266 struct vm_area_struct *vma;
267 struct mm_struct *mm;
268 unsigned long start;
269 unsigned long end;
270 unsigned flags;
271 enum mmu_notifier_event event;
Ralph Campbell998427b2020-07-23 15:30:01 -0700272 void *migrate_pgmap_owner;
Jason Gunthorpe56f434f2019-11-12 16:22:18 -0400273};
274
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700275static inline int mm_has_notifiers(struct mm_struct *mm)
276{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400277 return unlikely(mm->notifier_subscriptions);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700278}
279
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300280struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
281 struct mm_struct *mm);
282static inline struct mmu_notifier *
283mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
284{
285 struct mmu_notifier *ret;
286
Michel Lespinasseb72327f2020-06-08 21:33:18 -0700287 mmap_write_lock(mm);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300288 ret = mmu_notifier_get_locked(ops, mm);
Michel Lespinasseb72327f2020-06-08 21:33:18 -0700289 mmap_write_unlock(mm);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300290 return ret;
291}
Jason Gunthorpe19917222020-01-14 11:11:17 -0400292void mmu_notifier_put(struct mmu_notifier *subscription);
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300293void mmu_notifier_synchronize(void);
294
Jason Gunthorpe19917222020-01-14 11:11:17 -0400295extern int mmu_notifier_register(struct mmu_notifier *subscription,
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700296 struct mm_struct *mm);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400297extern int __mmu_notifier_register(struct mmu_notifier *subscription,
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700298 struct mm_struct *mm);
Jason Gunthorpe19917222020-01-14 11:11:17 -0400299extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700300 struct mm_struct *mm);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400301
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400302unsigned long
303mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
304int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400305 struct mm_struct *mm, unsigned long start,
306 unsigned long length,
307 const struct mmu_interval_notifier_ops *ops);
308int mmu_interval_notifier_insert_locked(
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400309 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400310 unsigned long start, unsigned long length,
311 const struct mmu_interval_notifier_ops *ops);
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400312void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400313
314/**
315 * mmu_interval_set_seq - Save the invalidation sequence
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400316 * @interval_sub - The subscription passed to invalidate
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400317 * @cur_seq - The cur_seq passed to the invalidate() callback
318 *
319 * This must be called unconditionally from the invalidate callback of a
320 * struct mmu_interval_notifier_ops under the same lock that is used to call
321 * mmu_interval_read_retry(). It updates the sequence number for later use by
322 * mmu_interval_read_retry(). The provided cur_seq will always be odd.
323 *
324 * If the caller does not call mmu_interval_read_begin() or
325 * mmu_interval_read_retry() then this call is not required.
326 */
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400327static inline void
328mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
329 unsigned long cur_seq)
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400330{
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400331 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400332}
333
334/**
335 * mmu_interval_read_retry - End a read side critical section against a VA range
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400336 * interval_sub: The subscription
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400337 * seq: The return of the paired mmu_interval_read_begin()
338 *
339 * This MUST be called under a user provided lock that is also held
340 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
341 *
342 * Each call should be paired with a single mmu_interval_read_begin() and
343 * should be used to conclude the read side.
344 *
345 * Returns true if an invalidation collided with this critical section, and
346 * the caller should retry.
347 */
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400348static inline bool
349mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
350 unsigned long seq)
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400351{
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400352 return interval_sub->invalidate_seq != seq;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400353}
354
355/**
356 * mmu_interval_check_retry - Test if a collision has occurred
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400357 * interval_sub: The subscription
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400358 * seq: The return of the matching mmu_interval_read_begin()
359 *
360 * This can be used in the critical section between mmu_interval_read_begin()
361 * and mmu_interval_read_retry(). A return of true indicates an invalidation
362 * has collided with this critical region and a future
363 * mmu_interval_read_retry() will return true.
364 *
365 * False is not reliable and only suggests a collision may not have
366 * occured. It can be called many times and does not have to hold the user
367 * provided lock.
368 *
369 * This call can be used as part of loops and other expensive operations to
370 * expedite a retry.
371 */
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400372static inline bool
373mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
374 unsigned long seq)
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400375{
376 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
Jason Gunthorpe5292e242020-01-14 11:29:52 -0400377 return READ_ONCE(interval_sub->invalidate_seq) != seq;
Jason Gunthorpe99cb2522019-11-12 16:22:19 -0400378}
379
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400380extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700381extern void __mmu_notifier_release(struct mm_struct *mm);
382extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700383 unsigned long start,
384 unsigned long end);
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700385extern int __mmu_notifier_clear_young(struct mm_struct *mm,
386 unsigned long start,
387 unsigned long end);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800388extern int __mmu_notifier_test_young(struct mm_struct *mm,
389 unsigned long address);
Izik Eidus828502d2009-09-21 17:01:51 -0700390extern void __mmu_notifier_change_pte(struct mm_struct *mm,
391 unsigned long address, pte_t pte);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800392extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
393extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800394 bool only_end);
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100395extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
396 unsigned long start, unsigned long end);
Jérôme Glissec6d23412019-05-13 17:21:00 -0700397extern bool
398mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700399
Jérôme Glisse4a83bfe2019-05-13 17:20:34 -0700400static inline bool
401mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
402{
Jérôme Glisse27560ee2019-05-13 17:20:42 -0700403 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
Jérôme Glisse4a83bfe2019-05-13 17:20:34 -0700404}
405
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700406static inline void mmu_notifier_release(struct mm_struct *mm)
407{
408 if (mm_has_notifiers(mm))
409 __mmu_notifier_release(mm);
410}
411
412static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700413 unsigned long start,
414 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700415{
416 if (mm_has_notifiers(mm))
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700417 return __mmu_notifier_clear_flush_young(mm, start, end);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700418 return 0;
419}
420
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700421static inline int mmu_notifier_clear_young(struct mm_struct *mm,
422 unsigned long start,
423 unsigned long end)
424{
425 if (mm_has_notifiers(mm))
426 return __mmu_notifier_clear_young(mm, start, end);
427 return 0;
428}
429
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800430static inline int mmu_notifier_test_young(struct mm_struct *mm,
431 unsigned long address)
432{
433 if (mm_has_notifiers(mm))
434 return __mmu_notifier_test_young(mm, address);
435 return 0;
436}
437
Izik Eidus828502d2009-09-21 17:01:51 -0700438static inline void mmu_notifier_change_pte(struct mm_struct *mm,
439 unsigned long address, pte_t pte)
440{
441 if (mm_has_notifiers(mm))
442 __mmu_notifier_change_pte(mm, address, pte);
443}
444
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800445static inline void
446mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700447{
Daniel Vetter810e24e2019-08-26 22:14:25 +0200448 might_sleep();
449
Daniel Vetter23b68392019-08-26 22:14:21 +0200450 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800451 if (mm_has_notifiers(range->mm)) {
Jérôme Glisse27560ee2019-05-13 17:20:42 -0700452 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800453 __mmu_notifier_invalidate_range_start(range);
454 }
Daniel Vetter23b68392019-08-26 22:14:21 +0200455 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
Michal Hocko93065ac2018-08-21 21:52:33 -0700456}
457
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800458static inline int
459mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
Michal Hocko93065ac2018-08-21 21:52:33 -0700460{
Daniel Vetter23b68392019-08-26 22:14:21 +0200461 int ret = 0;
462
463 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800464 if (mm_has_notifiers(range->mm)) {
Jérôme Glisse27560ee2019-05-13 17:20:42 -0700465 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
Daniel Vetter23b68392019-08-26 22:14:21 +0200466 ret = __mmu_notifier_invalidate_range_start(range);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800467 }
Daniel Vetter23b68392019-08-26 22:14:21 +0200468 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
469 return ret;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700470}
471
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800472static inline void
473mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700474{
Daniel Vetter810e24e2019-08-26 22:14:25 +0200475 if (mmu_notifier_range_blockable(range))
476 might_sleep();
477
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800478 if (mm_has_notifiers(range->mm))
479 __mmu_notifier_invalidate_range_end(range, false);
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800480}
481
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800482static inline void
483mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800484{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800485 if (mm_has_notifiers(range->mm))
486 __mmu_notifier_invalidate_range_end(range, true);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700487}
488
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100489static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
490 unsigned long start, unsigned long end)
491{
Joerg Roedel0f0a3272014-11-13 13:46:09 +1100492 if (mm_has_notifiers(mm))
493 __mmu_notifier_invalidate_range(mm, start, end);
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100494}
495
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400496static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700497{
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400498 mm->notifier_subscriptions = NULL;
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700499}
500
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400501static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700502{
503 if (mm_has_notifiers(mm))
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400504 __mmu_notifier_subscriptions_destroy(mm);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700505}
506
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800507
508static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -0700509 enum mmu_notifier_event event,
510 unsigned flags,
511 struct vm_area_struct *vma,
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800512 struct mm_struct *mm,
513 unsigned long start,
514 unsigned long end)
515{
Jérôme Glissebf198b22019-05-13 17:20:57 -0700516 range->vma = vma;
517 range->event = event;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800518 range->mm = mm;
519 range->start = start;
520 range->end = end;
Jérôme Glissebf198b22019-05-13 17:20:57 -0700521 range->flags = flags;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800522}
523
Ralph Campbellc1a06df2020-08-06 23:17:09 -0700524static inline void mmu_notifier_range_init_migrate(
525 struct mmu_notifier_range *range, unsigned int flags,
526 struct vm_area_struct *vma, struct mm_struct *mm,
527 unsigned long start, unsigned long end, void *pgmap)
528{
529 mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm,
530 start, end);
531 range->migrate_pgmap_owner = pgmap;
532}
533
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700534#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
535({ \
536 int __young; \
537 struct vm_area_struct *___vma = __vma; \
538 unsigned long ___address = __address; \
539 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
540 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700541 ___address, \
542 ___address + \
543 PAGE_SIZE); \
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700544 __young; \
545})
546
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800547#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
548({ \
549 int __young; \
550 struct vm_area_struct *___vma = __vma; \
551 unsigned long ___address = __address; \
552 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
553 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700554 ___address, \
555 ___address + \
556 PMD_SIZE); \
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800557 __young; \
558})
559
Vladimir Davydov1d7715c2015-09-09 15:35:41 -0700560#define ptep_clear_young_notify(__vma, __address, __ptep) \
561({ \
562 int __young; \
563 struct vm_area_struct *___vma = __vma; \
564 unsigned long ___address = __address; \
565 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
566 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
567 ___address + PAGE_SIZE); \
568 __young; \
569})
570
571#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
572({ \
573 int __young; \
574 struct vm_area_struct *___vma = __vma; \
575 unsigned long ___address = __address; \
576 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
577 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
578 ___address + PMD_SIZE); \
579 __young; \
580})
581
Joerg Roedel34ee6452014-11-13 13:46:09 +1100582#define ptep_clear_flush_notify(__vma, __address, __ptep) \
583({ \
584 unsigned long ___addr = __address & PAGE_MASK; \
585 struct mm_struct *___mm = (__vma)->vm_mm; \
586 pte_t ___pte; \
587 \
588 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
589 mmu_notifier_invalidate_range(___mm, ___addr, \
590 ___addr + PAGE_SIZE); \
591 \
592 ___pte; \
593})
594
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700595#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
Joerg Roedel34ee6452014-11-13 13:46:09 +1100596({ \
597 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
598 struct mm_struct *___mm = (__vma)->vm_mm; \
599 pmd_t ___pmd; \
600 \
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700601 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
Joerg Roedel34ee6452014-11-13 13:46:09 +1100602 mmu_notifier_invalidate_range(___mm, ___haddr, \
603 ___haddr + HPAGE_PMD_SIZE); \
604 \
605 ___pmd; \
606})
607
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800608#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
609({ \
610 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
611 struct mm_struct *___mm = (__vma)->vm_mm; \
612 pud_t ___pud; \
613 \
614 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
615 mmu_notifier_invalidate_range(___mm, ___haddr, \
616 ___haddr + HPAGE_PUD_SIZE); \
617 \
618 ___pud; \
619})
620
Xiao Guangrong48af0d72012-10-08 16:29:23 -0700621/*
622 * set_pte_at_notify() sets the pte _after_ running the notifier.
623 * This is safe to start by updating the secondary MMUs, because the primary MMU
624 * pte invalidate must have already happened with a ptep_clear_flush() before
625 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
626 * required when we change both the protection of the mapping from read-only to
627 * read-write and the pfn (like during copy on write page faults). Otherwise the
628 * old page would remain mapped readonly in the secondary MMUs after the new
629 * page is already writable by some CPU through the primary MMU.
630 */
Izik Eidus828502d2009-09-21 17:01:51 -0700631#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
632({ \
633 struct mm_struct *___mm = __mm; \
634 unsigned long ___address = __address; \
635 pte_t ___pte = __pte; \
636 \
Izik Eidus828502d2009-09-21 17:01:51 -0700637 mmu_notifier_change_pte(___mm, ___address, ___pte); \
Xiao Guangrong48af0d72012-10-08 16:29:23 -0700638 set_pte_at(___mm, ___address, __ptep, ___pte); \
Izik Eidus828502d2009-09-21 17:01:51 -0700639})
640
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700641#else /* CONFIG_MMU_NOTIFIER */
642
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800643struct mmu_notifier_range {
644 unsigned long start;
645 unsigned long end;
646};
647
648static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
649 unsigned long start,
650 unsigned long end)
651{
652 range->start = start;
653 range->end = end;
654}
655
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -0700656#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800657 _mmu_notifier_range_init(range, start, end)
Ralph Campbellc1a06df2020-08-06 23:17:09 -0700658#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \
659 pgmap) \
660 _mmu_notifier_range_init(range, start, end)
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800661
Jérôme Glisse4a83bfe2019-05-13 17:20:34 -0700662static inline bool
663mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
664{
665 return true;
666}
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800667
Michal Hocko4d4bbd82017-10-03 16:14:50 -0700668static inline int mm_has_notifiers(struct mm_struct *mm)
669{
670 return 0;
671}
672
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700673static inline void mmu_notifier_release(struct mm_struct *mm)
674{
675}
676
677static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700678 unsigned long start,
679 unsigned long end)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700680{
681 return 0;
682}
683
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800684static inline int mmu_notifier_test_young(struct mm_struct *mm,
685 unsigned long address)
686{
687 return 0;
688}
689
Izik Eidus828502d2009-09-21 17:01:51 -0700690static inline void mmu_notifier_change_pte(struct mm_struct *mm,
691 unsigned long address, pte_t pte)
692{
693}
694
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800695static inline void
696mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700697{
698}
699
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800700static inline int
701mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
Michal Hocko93065ac2018-08-21 21:52:33 -0700702{
703 return 0;
704}
705
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800706static inline
707void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700708{
709}
710
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800711static inline void
712mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
Jérôme Glisse4645b9f2017-11-15 17:34:11 -0800713{
714}
715
Joerg Roedel1897bdc2014-11-13 13:46:09 +1100716static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
717 unsigned long start, unsigned long end)
718{
719}
720
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400721static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700722{
723}
724
Jason Gunthorpe984cfe42019-12-18 13:40:35 -0400725static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700726{
727}
728
Jérôme Glissec6d23412019-05-13 17:21:00 -0700729#define mmu_notifier_range_update_to_read_only(r) false
730
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700731#define ptep_clear_flush_young_notify ptep_clear_flush_young
Andrea Arcangeli91a4ee22011-01-13 15:46:44 -0800732#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700733#define ptep_clear_young_notify ptep_test_and_clear_young
734#define pmdp_clear_young_notify pmdp_test_and_clear_young
Joerg Roedel34ee6452014-11-13 13:46:09 +1100735#define ptep_clear_flush_notify ptep_clear_flush
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700736#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800737#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
Izik Eidus828502d2009-09-21 17:01:51 -0700738#define set_pte_at_notify set_pte_at
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700739
Jason Gunthorpe2c7933f2019-08-06 20:15:40 -0300740static inline void mmu_notifier_synchronize(void)
741{
742}
743
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700744#endif /* CONFIG_MMU_NOTIFIER */
745
746#endif /* _LINUX_MMU_NOTIFIER_H */