blob: 3f1649a8cf556255beebef6102f3363e26c088da [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Uwe Zeisbergerf30c2262006-10-03 23:01:26 +02002/* include/asm-generic/tlb.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Generic TLB shootdown code
5 *
6 * Copyright 2001 Red Hat, Inc.
7 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8 *
Peter Zijlstra90eec102015-11-16 11:08:45 +01009 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11#ifndef _ASM_GENERIC__TLB_H
12#define _ASM_GENERIC__TLB_H
13
Nicholas Pigginfd1102f2018-08-23 18:47:09 +100014#include <linux/mmu_notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/swap.h>
Anshuman Khandual03911132020-04-06 20:03:51 -070016#include <linux/hugetlb_inline.h>
Ingo Molnar62152d02008-01-31 22:05:48 +010017#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/tlbflush.h>
Peter Zijlstrae7fd28a2018-08-27 13:00:17 +020019#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Nadav Amit5932c9f2019-04-25 17:11:42 -070021/*
22 * Blindly accessing user memory from NMI context can be dangerous
23 * if we're in the middle of switching the current user task or switching
24 * the loaded mm.
25 */
26#ifndef nmi_uaccess_okay
27# define nmi_uaccess_okay() true
28#endif
29
Will Deaconfaaadaf2018-08-24 13:28:28 +010030#ifdef CONFIG_MMU
31
Peter Zijlstradea24342018-09-04 10:43:14 +020032/*
33 * Generic MMU-gather implementation.
34 *
35 * The mmu_gather data structure is used by the mm code to implement the
36 * correct and efficient ordering of freeing pages and TLB invalidations.
37 *
38 * This correct ordering is:
39 *
40 * 1) unhook page
41 * 2) TLB invalidate page
42 * 3) free page
43 *
44 * That is, we must never free a page before we have ensured there are no live
45 * translations left to it. Otherwise it might be possible to observe (or
46 * worse, change) the page content after it has been reused.
47 *
48 * The mmu_gather API consists of:
49 *
50 * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
51 *
52 * Finish in particular will issue a (final) TLB invalidate and free
53 * all (remaining) queued pages.
54 *
55 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
56 *
57 * Defaults to flushing at tlb_end_vma() to reset the range; helps when
58 * there's large holes between the VMAs.
59 *
Peter Zijlstra0d6e24d2020-02-03 17:37:11 -080060 * - tlb_remove_table()
61 *
62 * tlb_remove_table() is the basic primitive to free page-table directories
63 * (__p*_free_tlb()). In it's most primitive form it is an alias for
64 * tlb_remove_page() below, for when page directories are pages and have no
65 * additional constraints.
66 *
67 * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
68 *
Peter Zijlstradea24342018-09-04 10:43:14 +020069 * - tlb_remove_page() / __tlb_remove_page()
70 * - tlb_remove_page_size() / __tlb_remove_page_size()
71 *
72 * __tlb_remove_page_size() is the basic primitive that queues a page for
73 * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
74 * boolean indicating if the queue is (now) full and a call to
75 * tlb_flush_mmu() is required.
76 *
77 * tlb_remove_page() and tlb_remove_page_size() imply the call to
78 * tlb_flush_mmu() when required and has no return value.
79 *
Peter Zijlstraed6a7932018-08-31 14:46:08 +020080 * - tlb_change_page_size()
Peter Zijlstradea24342018-09-04 10:43:14 +020081 *
82 * call before __tlb_remove_page*() to set the current page-size; implies a
83 * possible tlb_flush_mmu() call.
84 *
Peter Zijlstrafa0aafb2018-09-20 10:54:04 +020085 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
Peter Zijlstradea24342018-09-04 10:43:14 +020086 *
87 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
88 * related state, like the range)
89 *
Peter Zijlstrafa0aafb2018-09-20 10:54:04 +020090 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
91 * whatever pages are still batched.
Peter Zijlstradea24342018-09-04 10:43:14 +020092 *
93 * - mmu_gather::fullmm
94 *
95 * A flag set by tlb_gather_mmu() to indicate we're going to free
96 * the entire mm; this allows a number of optimizations.
97 *
98 * - We can ignore tlb_{start,end}_vma(); because we don't
99 * care about ranges. Everything will be shot down.
100 *
101 * - (RISC) architectures that use ASIDs can cycle to a new ASID
102 * and delay the invalidation until ASID space runs out.
103 *
104 * - mmu_gather::need_flush_all
105 *
106 * A flag that can be set by the arch code if it wants to force
107 * flush the entire TLB irrespective of the range. For instance
108 * x86-PAE needs this when changing top-level entries.
109 *
Peter Zijlstra5f307be2018-09-04 13:18:15 +0200110 * And allows the architecture to provide and implement tlb_flush():
Peter Zijlstradea24342018-09-04 10:43:14 +0200111 *
112 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
113 * use of:
114 *
115 * - mmu_gather::start / mmu_gather::end
116 *
117 * which provides the range that needs to be flushed to cover the pages to
118 * be freed.
119 *
120 * - mmu_gather::freed_tables
121 *
122 * set when we freed page table pages
123 *
124 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
125 *
Peter Zijlstra5f307be2018-09-04 13:18:15 +0200126 * returns the smallest TLB entry size unmapped in this range.
127 *
128 * If an architecture does not provide tlb_flush() a default implementation
Peter Zijlstraa30e32b2018-10-11 16:51:51 +0200129 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
130 * specified, in which case we'll default to flush_tlb_mm().
Peter Zijlstradea24342018-09-04 10:43:14 +0200131 *
132 * Additionally there are a few opt-in features:
133 *
Peter Zijlstra3af4bd02020-02-03 17:37:05 -0800134 * MMU_GATHER_PAGE_SIZE
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200135 *
136 * This ensures we call tlb_flush() every time tlb_change_page_size() actually
137 * changes the size and provides mmu_gather::page_size to tlb_flush().
138 *
Peter Zijlstra3af4bd02020-02-03 17:37:05 -0800139 * This might be useful if your architecture has size specific TLB
140 * invalidation instructions.
141 *
Peter Zijlstra0d6e24d2020-02-03 17:37:11 -0800142 * MMU_GATHER_TABLE_FREE
Peter Zijlstradea24342018-09-04 10:43:14 +0200143 *
144 * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
Peter Zijlstra0d6e24d2020-02-03 17:37:11 -0800145 * for page directores (__p*_free_tlb()).
146 *
147 * Useful if your architecture has non-page page directories.
Peter Zijlstradea24342018-09-04 10:43:14 +0200148 *
149 * When used, an architecture is expected to provide __tlb_remove_table()
150 * which does the actual freeing of these pages.
151 *
Peter Zijlstra0d6e24d2020-02-03 17:37:11 -0800152 * MMU_GATHER_RCU_TABLE_FREE
153 *
154 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
155 * comment below).
156 *
157 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
158 * and therefore doesn't naturally serialize with software page-table walkers.
159 *
Peter Zijlstraa30e32b2018-10-11 16:51:51 +0200160 * MMU_GATHER_NO_RANGE
161 *
162 * Use this if your architecture lacks an efficient flush_tlb_range().
Peter Zijlstra580a5862020-02-03 17:37:08 -0800163 *
164 * MMU_GATHER_NO_GATHER
165 *
166 * If the option is set the mmu_gather will not track individual pages for
167 * delayed page free anymore. A platform that enables the option needs to
168 * provide its own implementation of the __tlb_remove_page_size() function to
169 * free pages.
170 *
171 * This is useful if your architecture already flushes TLB entries in the
172 * various ptep_get_and_clear() functions.
Peter Zijlstradea24342018-09-04 10:43:14 +0200173 */
Peter Zijlstradea24342018-09-04 10:43:14 +0200174
Peter Zijlstra0d6e24d2020-02-03 17:37:11 -0800175#ifdef CONFIG_MMU_GATHER_TABLE_FREE
176
Peter Zijlstra26723912011-05-24 17:12:00 -0700177struct mmu_table_batch {
Peter Zijlstra0d6e24d2020-02-03 17:37:11 -0800178#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
Peter Zijlstra26723912011-05-24 17:12:00 -0700179 struct rcu_head rcu;
Peter Zijlstra0d6e24d2020-02-03 17:37:11 -0800180#endif
Peter Zijlstra26723912011-05-24 17:12:00 -0700181 unsigned int nr;
182 void *tables[0];
183};
184
185#define MAX_TABLE_BATCH \
186 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
187
Peter Zijlstra26723912011-05-24 17:12:00 -0700188extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
189
Peter Zijlstra0d6e24d2020-02-03 17:37:11 -0800190#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
191
192/*
193 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
194 * page directories and we can use the normal page batching to free them.
195 */
196#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
197
198#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
199
200#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
Peter Zijlstra0ed13252020-02-03 17:36:49 -0800201/*
202 * This allows an architecture that does not use the linux page-tables for
203 * hardware to skip the TLBI when freeing page tables.
204 */
205#ifndef tlb_needs_table_invalidate
206#define tlb_needs_table_invalidate() (true)
Peter Zijlstra26723912011-05-24 17:12:00 -0700207#endif
208
Peter Zijlstra0ed13252020-02-03 17:36:49 -0800209#else
210
211#ifdef tlb_needs_table_invalidate
Peter Zijlstraff2e6d722020-02-03 17:37:02 -0800212#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
Peter Zijlstra0ed13252020-02-03 17:36:49 -0800213#endif
214
Peter Zijlstraff2e6d722020-02-03 17:37:02 -0800215#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
Peter Zijlstra0ed13252020-02-03 17:36:49 -0800216
217
Peter Zijlstra580a5862020-02-03 17:37:08 -0800218#ifndef CONFIG_MMU_GATHER_NO_GATHER
Peter Zijlstrad16dfc52011-05-24 17:11:45 -0700219/*
220 * If we can't allocate a page to make a big batch of page pointers
221 * to work on, then just handle a few from the on-stack structure.
222 */
223#define MMU_GATHER_BUNDLE 8
224
Peter Zijlstrae3032972011-05-24 17:12:01 -0700225struct mmu_gather_batch {
226 struct mmu_gather_batch *next;
227 unsigned int nr;
228 unsigned int max;
229 struct page *pages[0];
230};
231
232#define MAX_GATHER_BATCH \
233 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
234
Michal Hocko53a59fc2013-01-04 15:35:12 -0800235/*
236 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
237 * lockups for non-preemptible kernels on huge machines when a lot of memory
238 * is zapped during unmapping.
239 * 10K pages freed at once should be safe even without a preemption point.
240 */
241#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
242
Martin Schwidefsky952a31c2018-09-18 14:51:50 +0200243extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
244 int page_size);
245#endif
246
Peter Zijlstradea24342018-09-04 10:43:14 +0200247/*
248 * struct mmu_gather is an opaque type used by the mm code for passing around
Hugh Dickins15a23ff2005-10-29 18:16:01 -0700249 * any data needed by arch specific code for tlb_remove_page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 */
251struct mmu_gather {
252 struct mm_struct *mm;
Peter Zijlstradea24342018-09-04 10:43:14 +0200253
Peter Zijlstra0d6e24d2020-02-03 17:37:11 -0800254#ifdef CONFIG_MMU_GATHER_TABLE_FREE
Peter Zijlstra26723912011-05-24 17:12:00 -0700255 struct mmu_table_batch *batch;
256#endif
Peter Zijlstradea24342018-09-04 10:43:14 +0200257
Alex Shi597e1c32012-06-28 09:02:21 +0800258 unsigned long start;
259 unsigned long end;
Peter Zijlstra22a61c32018-08-23 20:27:25 +0100260 /*
261 * we are in the middle of an operation to clear
262 * a full mm and can make some optimizations
263 */
264 unsigned int fullmm : 1;
265
266 /*
267 * we have performed an operation which
268 * requires a complete flush of the tlb
269 */
270 unsigned int need_flush_all : 1;
271
272 /*
273 * we have removed page directories
274 */
275 unsigned int freed_tables : 1;
Peter Zijlstrae3032972011-05-24 17:12:01 -0700276
Will Deacona6d60242018-08-23 21:01:46 +0100277 /*
278 * at which levels have we cleared entries?
279 */
280 unsigned int cleared_ptes : 1;
281 unsigned int cleared_pmds : 1;
282 unsigned int cleared_puds : 1;
283 unsigned int cleared_p4ds : 1;
284
Peter Zijlstra5f307be2018-09-04 13:18:15 +0200285 /*
286 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
287 */
288 unsigned int vma_exec : 1;
289 unsigned int vma_huge : 1;
290
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200291 unsigned int batch_count;
292
Peter Zijlstra580a5862020-02-03 17:37:08 -0800293#ifndef CONFIG_MMU_GATHER_NO_GATHER
Peter Zijlstrae3032972011-05-24 17:12:01 -0700294 struct mmu_gather_batch *active;
295 struct mmu_gather_batch local;
296 struct page *__pages[MMU_GATHER_BUNDLE];
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200297
Peter Zijlstra3af4bd02020-02-03 17:37:05 -0800298#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200299 unsigned int page_size;
300#endif
Martin Schwidefsky952a31c2018-09-18 14:51:50 +0200301#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302};
303
Peter Zijlstra9547d012011-05-24 17:12:14 -0700304void tlb_flush_mmu(struct mmu_gather *tlb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Will Deaconfb7332a2014-10-29 10:03:09 +0000306static inline void __tlb_adjust_range(struct mmu_gather *tlb,
Aneesh Kumar K.Vb5bc66b2016-12-12 16:42:34 -0800307 unsigned long address,
308 unsigned int range_size)
Will Deaconfb7332a2014-10-29 10:03:09 +0000309{
310 tlb->start = min(tlb->start, address);
Aneesh Kumar K.Vb5bc66b2016-12-12 16:42:34 -0800311 tlb->end = max(tlb->end, address + range_size);
Will Deaconfb7332a2014-10-29 10:03:09 +0000312}
313
314static inline void __tlb_reset_range(struct mmu_gather *tlb)
315{
Will Deacon721c21c2015-01-12 19:10:55 +0000316 if (tlb->fullmm) {
317 tlb->start = tlb->end = ~0;
318 } else {
319 tlb->start = TASK_SIZE;
320 tlb->end = 0;
321 }
Peter Zijlstra22a61c32018-08-23 20:27:25 +0100322 tlb->freed_tables = 0;
Will Deacona6d60242018-08-23 21:01:46 +0100323 tlb->cleared_ptes = 0;
324 tlb->cleared_pmds = 0;
325 tlb->cleared_puds = 0;
326 tlb->cleared_p4ds = 0;
Peter Zijlstra5f307be2018-09-04 13:18:15 +0200327 /*
328 * Do not reset mmu_gather::vma_* fields here, we do not
329 * call into tlb_start_vma() again to set them if there is an
330 * intermediate flush.
331 */
Will Deaconfb7332a2014-10-29 10:03:09 +0000332}
333
Peter Zijlstraa30e32b2018-10-11 16:51:51 +0200334#ifdef CONFIG_MMU_GATHER_NO_RANGE
335
336#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
337#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
338#endif
339
340/*
341 * When an architecture does not have efficient means of range flushing TLBs
342 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
343 * range small. We equally don't have to worry about page granularity or other
344 * things.
345 *
346 * All we need to do is issue a full flush for any !0 range.
347 */
348static inline void tlb_flush(struct mmu_gather *tlb)
349{
350 if (tlb->end)
351 flush_tlb_mm(tlb->mm);
352}
353
354static inline void
355tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
356
357#define tlb_end_vma tlb_end_vma
358static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
359
360#else /* CONFIG_MMU_GATHER_NO_RANGE */
361
Peter Zijlstra5f307be2018-09-04 13:18:15 +0200362#ifndef tlb_flush
363
364#if defined(tlb_start_vma) || defined(tlb_end_vma)
365#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
366#endif
367
Peter Zijlstraa30e32b2018-10-11 16:51:51 +0200368/*
369 * When an architecture does not provide its own tlb_flush() implementation
370 * but does have a reasonably efficient flush_vma_range() implementation
371 * use that.
372 */
Peter Zijlstra5f307be2018-09-04 13:18:15 +0200373static inline void tlb_flush(struct mmu_gather *tlb)
374{
375 if (tlb->fullmm || tlb->need_flush_all) {
376 flush_tlb_mm(tlb->mm);
377 } else if (tlb->end) {
378 struct vm_area_struct vma = {
379 .vm_mm = tlb->mm,
380 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
381 (tlb->vma_huge ? VM_HUGETLB : 0),
382 };
383
384 flush_tlb_range(&vma, tlb->start, tlb->end);
385 }
386}
387
388static inline void
389tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
390{
391 /*
392 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
393 * mips-4k) flush only large pages.
394 *
395 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
396 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
397 * range.
398 *
399 * We rely on tlb_end_vma() to issue a flush, such that when we reset
400 * these values the batch is empty.
401 */
Anshuman Khandual03911132020-04-06 20:03:51 -0700402 tlb->vma_huge = is_vm_hugetlb_page(vma);
Peter Zijlstra5f307be2018-09-04 13:18:15 +0200403 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
404}
405
406#else
407
408static inline void
409tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
410
411#endif
412
Peter Zijlstraa30e32b2018-10-11 16:51:51 +0200413#endif /* CONFIG_MMU_GATHER_NO_RANGE */
414
Nicholas Pigginfd1102f2018-08-23 18:47:09 +1000415static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
416{
Peter Zijlstra0758cd82020-02-03 17:36:53 -0800417 /*
418 * Anything calling __tlb_adjust_range() also sets at least one of
419 * these bits.
420 */
421 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
422 tlb->cleared_puds || tlb->cleared_p4ds))
Nicholas Pigginfd1102f2018-08-23 18:47:09 +1000423 return;
424
425 tlb_flush(tlb);
426 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
427 __tlb_reset_range(tlb);
428}
429
Aneesh Kumar K.Ve77b0852016-07-26 15:24:12 -0700430static inline void tlb_remove_page_size(struct mmu_gather *tlb,
431 struct page *page, int page_size)
432{
Aneesh Kumar K.V692a68c2016-12-12 16:42:43 -0800433 if (__tlb_remove_page_size(tlb, page, page_size))
Aneesh Kumar K.Ve77b0852016-07-26 15:24:12 -0700434 tlb_flush_mmu(tlb);
Aneesh Kumar K.Ve77b0852016-07-26 15:24:12 -0700435}
436
Aneesh Kumar K.V692a68c2016-12-12 16:42:43 -0800437static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
Aneesh Kumar K.Ve77b0852016-07-26 15:24:12 -0700438{
439 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
440}
441
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -0700442/* tlb_remove_page
443 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
444 * required.
445 */
446static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
447{
Aneesh Kumar K.Ve77b0852016-07-26 15:24:12 -0700448 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -0700449}
450
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200451static inline void tlb_change_page_size(struct mmu_gather *tlb,
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -0800452 unsigned int page_size)
453{
Peter Zijlstra3af4bd02020-02-03 17:37:05 -0800454#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200455 if (tlb->page_size && tlb->page_size != page_size) {
Aneesh Kumar K.V864edb72019-10-24 13:28:01 +0530456 if (!tlb->fullmm && !tlb->need_flush_all)
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200457 tlb_flush_mmu(tlb);
458 }
459
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -0800460 tlb->page_size = page_size;
461#endif
462}
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -0800463
Will Deacona6d60242018-08-23 21:01:46 +0100464static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
465{
466 if (tlb->cleared_ptes)
467 return PAGE_SHIFT;
468 if (tlb->cleared_pmds)
469 return PMD_SHIFT;
470 if (tlb->cleared_puds)
471 return PUD_SHIFT;
472 if (tlb->cleared_p4ds)
473 return P4D_SHIFT;
474
475 return PAGE_SHIFT;
476}
477
478static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
479{
480 return 1UL << tlb_get_unmap_shift(tlb);
481}
482
Will Deaconfb7332a2014-10-29 10:03:09 +0000483/*
484 * In the case of tlb vma handling, we can optimise these away in the
485 * case where we're doing a full MM flush. When we're doing a munmap,
486 * the vmas are adjusted to only cover the region to be torn down.
487 */
488#ifndef tlb_start_vma
Peter Zijlstra5f307be2018-09-04 13:18:15 +0200489static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
490{
491 if (tlb->fullmm)
492 return;
493
494 tlb_update_vma_flags(tlb, vma);
495 flush_cache_range(vma, vma->vm_start, vma->vm_end);
496}
Will Deaconfb7332a2014-10-29 10:03:09 +0000497#endif
498
Will Deaconfb7332a2014-10-29 10:03:09 +0000499#ifndef tlb_end_vma
Peter Zijlstra5f307be2018-09-04 13:18:15 +0200500static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
501{
502 if (tlb->fullmm)
503 return;
504
505 /*
506 * Do a TLB flush and reset the range at VMA boundaries; this avoids
507 * the ranges growing with the unused space between consecutive VMAs,
508 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
509 * this.
510 */
511 tlb_flush_mmu_tlbonly(tlb);
512}
Will Deaconfb7332a2014-10-29 10:03:09 +0000513#endif
514
515#ifndef __tlb_remove_tlb_entry
516#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
517#endif
518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519/**
520 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
521 *
Will Deaconfb7332a2014-10-29 10:03:09 +0000522 * Record the fact that pte's were really unmapped by updating the range,
523 * so we can later optimise away the tlb invalidate. This helps when
524 * userspace is unmapping already-unmapped pages, which happens quite a lot.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 */
526#define tlb_remove_tlb_entry(tlb, ptep, address) \
527 do { \
Aneesh Kumar K.Vb5bc66b2016-12-12 16:42:34 -0800528 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
Will Deacona6d60242018-08-23 21:01:46 +0100529 tlb->cleared_ptes = 1; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 __tlb_remove_tlb_entry(tlb, ptep, address); \
531 } while (0)
532
Will Deacona6d60242018-08-23 21:01:46 +0100533#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
534 do { \
535 unsigned long _sz = huge_page_size(h); \
536 __tlb_adjust_range(tlb, address, _sz); \
537 if (_sz == PMD_SIZE) \
538 tlb->cleared_pmds = 1; \
539 else if (_sz == PUD_SIZE) \
540 tlb->cleared_puds = 1; \
541 __tlb_remove_tlb_entry(tlb, ptep, address); \
Aneesh Kumar K.Vb528e4b2016-12-12 16:42:37 -0800542 } while (0)
543
Shaohua Lif21760b2012-01-12 17:19:16 -0800544/**
545 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
546 * This is a nop so far, because only x86 needs it.
547 */
548#ifndef __tlb_remove_pmd_tlb_entry
549#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
550#endif
551
Aneesh Kumar K.Vb5bc66b2016-12-12 16:42:34 -0800552#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
553 do { \
554 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
Will Deacona6d60242018-08-23 21:01:46 +0100555 tlb->cleared_pmds = 1; \
Aneesh Kumar K.Vb5bc66b2016-12-12 16:42:34 -0800556 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
Shaohua Lif21760b2012-01-12 17:19:16 -0800557 } while (0)
558
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800559/**
560 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
561 * invalidation. This is a nop so far, because only x86 needs it.
562 */
563#ifndef __tlb_remove_pud_tlb_entry
564#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
565#endif
566
567#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
568 do { \
569 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
Will Deacona6d60242018-08-23 21:01:46 +0100570 tlb->cleared_puds = 1; \
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -0800571 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
572 } while (0)
573
Aneesh Kumar K.Vb5bc66b2016-12-12 16:42:34 -0800574/*
575 * For things like page tables caches (ie caching addresses "inside" the
576 * page tables, like x86 does), for legacy reasons, flushing an
577 * individual page had better flush the page table caches behind it. This
578 * is definitely how x86 works, for example. And if you have an
579 * architected non-legacy page table cache (which I'm not aware of
580 * anybody actually doing), you're going to have some architecturally
581 * explicit flushing for that, likely *separate* from a regular TLB entry
582 * flush, and thus you'd need more than just some range expansion..
583 *
584 * So if we ever find an architecture
585 * that would want something that odd, I think it is up to that
586 * architecture to do its own odd thing, not cause pain for others
587 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
588 *
589 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
590 */
591
Nicholas Piggina90744b2018-07-13 16:59:03 -0700592#ifndef pte_free_tlb
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000593#define pte_free_tlb(tlb, ptep, address) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 do { \
Aneesh Kumar K.Vb5bc66b2016-12-12 16:42:34 -0800595 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
Will Deacona6d60242018-08-23 21:01:46 +0100596 tlb->freed_tables = 1; \
597 tlb->cleared_pmds = 1; \
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000598 __pte_free_tlb(tlb, ptep, address); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 } while (0)
Nicholas Piggina90744b2018-07-13 16:59:03 -0700600#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Nicholas Piggina90744b2018-07-13 16:59:03 -0700602#ifndef pmd_free_tlb
Kirill A. Shutemov048456d2017-03-09 17:24:06 +0300603#define pmd_free_tlb(tlb, pmdp, address) \
604 do { \
Peter Zijlstra22a61c32018-08-23 20:27:25 +0100605 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
Will Deacona6d60242018-08-23 21:01:46 +0100606 tlb->freed_tables = 1; \
607 tlb->cleared_puds = 1; \
Kirill A. Shutemov048456d2017-03-09 17:24:06 +0300608 __pmd_free_tlb(tlb, pmdp, address); \
609 } while (0)
Nicholas Piggina90744b2018-07-13 16:59:03 -0700610#endif
Kirill A. Shutemov048456d2017-03-09 17:24:06 +0300611
Nicholas Piggina90744b2018-07-13 16:59:03 -0700612#ifndef pud_free_tlb
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000613#define pud_free_tlb(tlb, pudp, address) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 do { \
Aneesh Kumar K.Vb5bc66b2016-12-12 16:42:34 -0800615 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
Will Deacona6d60242018-08-23 21:01:46 +0100616 tlb->freed_tables = 1; \
617 tlb->cleared_p4ds = 1; \
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000618 __pud_free_tlb(tlb, pudp, address); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 } while (0)
620#endif
621
Nicholas Piggina90744b2018-07-13 16:59:03 -0700622#ifndef p4d_free_tlb
Kirill A. Shutemov048456d2017-03-09 17:24:06 +0300623#define p4d_free_tlb(tlb, pudp, address) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 do { \
Peter Zijlstra22a61c32018-08-23 20:27:25 +0100625 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
Will Deacona6d60242018-08-23 21:01:46 +0100626 tlb->freed_tables = 1; \
Kirill A. Shutemov048456d2017-03-09 17:24:06 +0300627 __p4d_free_tlb(tlb, pudp, address); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 } while (0)
Kirill A. Shutemov048456d2017-03-09 17:24:06 +0300629#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Will Deaconfaaadaf2018-08-24 13:28:28 +0100631#endif /* CONFIG_MMU */
632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633#endif /* _ASM_GENERIC__TLB_H */