Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 | /* include/asm-generic/tlb.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Generic TLB shootdown code |
| 5 | * |
| 6 | * Copyright 2001 Red Hat, Inc. |
| 7 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. |
| 8 | * |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 9 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | #ifndef _ASM_GENERIC__TLB_H |
| 12 | #define _ASM_GENERIC__TLB_H |
| 13 | |
Nicholas Piggin | fd1102f | 2018-08-23 18:47:09 +1000 | [diff] [blame] | 14 | #include <linux/mmu_notifier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/swap.h> |
Ingo Molnar | 62152d0 | 2008-01-31 22:05:48 +0100 | [diff] [blame] | 16 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/tlbflush.h> |
Peter Zijlstra | e7fd28a | 2018-08-27 13:00:17 +0200 | [diff] [blame] | 18 | #include <asm/cacheflush.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
Nadav Amit | 5932c9f | 2019-04-25 17:11:42 -0700 | [diff] [blame] | 20 | /* |
| 21 | * Blindly accessing user memory from NMI context can be dangerous |
| 22 | * if we're in the middle of switching the current user task or switching |
| 23 | * the loaded mm. |
| 24 | */ |
| 25 | #ifndef nmi_uaccess_okay |
| 26 | # define nmi_uaccess_okay() true |
| 27 | #endif |
| 28 | |
Will Deacon | faaadaf | 2018-08-24 13:28:28 +0100 | [diff] [blame] | 29 | #ifdef CONFIG_MMU |
| 30 | |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 31 | /* |
| 32 | * Generic MMU-gather implementation. |
| 33 | * |
| 34 | * The mmu_gather data structure is used by the mm code to implement the |
| 35 | * correct and efficient ordering of freeing pages and TLB invalidations. |
| 36 | * |
| 37 | * This correct ordering is: |
| 38 | * |
| 39 | * 1) unhook page |
| 40 | * 2) TLB invalidate page |
| 41 | * 3) free page |
| 42 | * |
| 43 | * That is, we must never free a page before we have ensured there are no live |
| 44 | * translations left to it. Otherwise it might be possible to observe (or |
| 45 | * worse, change) the page content after it has been reused. |
| 46 | * |
| 47 | * The mmu_gather API consists of: |
| 48 | * |
| 49 | * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather |
| 50 | * |
| 51 | * Finish in particular will issue a (final) TLB invalidate and free |
| 52 | * all (remaining) queued pages. |
| 53 | * |
| 54 | * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA |
| 55 | * |
| 56 | * Defaults to flushing at tlb_end_vma() to reset the range; helps when |
| 57 | * there's large holes between the VMAs. |
| 58 | * |
| 59 | * - tlb_remove_page() / __tlb_remove_page() |
| 60 | * - tlb_remove_page_size() / __tlb_remove_page_size() |
| 61 | * |
| 62 | * __tlb_remove_page_size() is the basic primitive that queues a page for |
| 63 | * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a |
| 64 | * boolean indicating if the queue is (now) full and a call to |
| 65 | * tlb_flush_mmu() is required. |
| 66 | * |
| 67 | * tlb_remove_page() and tlb_remove_page_size() imply the call to |
| 68 | * tlb_flush_mmu() when required and has no return value. |
| 69 | * |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 70 | * - tlb_change_page_size() |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 71 | * |
| 72 | * call before __tlb_remove_page*() to set the current page-size; implies a |
| 73 | * possible tlb_flush_mmu() call. |
| 74 | * |
Peter Zijlstra | fa0aafb | 2018-09-20 10:54:04 +0200 | [diff] [blame] | 75 | * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 76 | * |
| 77 | * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets |
| 78 | * related state, like the range) |
| 79 | * |
Peter Zijlstra | fa0aafb | 2018-09-20 10:54:04 +0200 | [diff] [blame] | 80 | * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees |
| 81 | * whatever pages are still batched. |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 82 | * |
| 83 | * - mmu_gather::fullmm |
| 84 | * |
| 85 | * A flag set by tlb_gather_mmu() to indicate we're going to free |
| 86 | * the entire mm; this allows a number of optimizations. |
| 87 | * |
| 88 | * - We can ignore tlb_{start,end}_vma(); because we don't |
| 89 | * care about ranges. Everything will be shot down. |
| 90 | * |
| 91 | * - (RISC) architectures that use ASIDs can cycle to a new ASID |
| 92 | * and delay the invalidation until ASID space runs out. |
| 93 | * |
| 94 | * - mmu_gather::need_flush_all |
| 95 | * |
| 96 | * A flag that can be set by the arch code if it wants to force |
| 97 | * flush the entire TLB irrespective of the range. For instance |
| 98 | * x86-PAE needs this when changing top-level entries. |
| 99 | * |
Peter Zijlstra | 5f307be | 2018-09-04 13:18:15 +0200 | [diff] [blame] | 100 | * And allows the architecture to provide and implement tlb_flush(): |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 101 | * |
| 102 | * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make |
| 103 | * use of: |
| 104 | * |
| 105 | * - mmu_gather::start / mmu_gather::end |
| 106 | * |
| 107 | * which provides the range that needs to be flushed to cover the pages to |
| 108 | * be freed. |
| 109 | * |
| 110 | * - mmu_gather::freed_tables |
| 111 | * |
| 112 | * set when we freed page table pages |
| 113 | * |
| 114 | * - tlb_get_unmap_shift() / tlb_get_unmap_size() |
| 115 | * |
Peter Zijlstra | 5f307be | 2018-09-04 13:18:15 +0200 | [diff] [blame] | 116 | * returns the smallest TLB entry size unmapped in this range. |
| 117 | * |
| 118 | * If an architecture does not provide tlb_flush() a default implementation |
Peter Zijlstra | a30e32b | 2018-10-11 16:51:51 +0200 | [diff] [blame] | 119 | * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is |
| 120 | * specified, in which case we'll default to flush_tlb_mm(). |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 121 | * |
| 122 | * Additionally there are a few opt-in features: |
| 123 | * |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 124 | * HAVE_MMU_GATHER_PAGE_SIZE |
| 125 | * |
| 126 | * This ensures we call tlb_flush() every time tlb_change_page_size() actually |
| 127 | * changes the size and provides mmu_gather::page_size to tlb_flush(). |
| 128 | * |
Peter Zijlstra | ff2e6d72 | 2020-02-03 17:37:02 -0800 | [diff] [blame^] | 129 | * MMU_GATHER_RCU_TABLE_FREE |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 130 | * |
| 131 | * This provides tlb_remove_table(), to be used instead of tlb_remove_page() |
| 132 | * for page directores (__p*_free_tlb()). This provides separate freeing of |
| 133 | * the page-table pages themselves in a semi-RCU fashion (see comment below). |
| 134 | * Useful if your architecture doesn't use IPIs for remote TLB invalidates |
| 135 | * and therefore doesn't naturally serialize with software page-table walkers. |
| 136 | * |
| 137 | * When used, an architecture is expected to provide __tlb_remove_table() |
| 138 | * which does the actual freeing of these pages. |
| 139 | * |
Peter Zijlstra | a30e32b | 2018-10-11 16:51:51 +0200 | [diff] [blame] | 140 | * MMU_GATHER_NO_RANGE |
| 141 | * |
| 142 | * Use this if your architecture lacks an efficient flush_tlb_range(). |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 143 | */ |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 144 | |
Peter Zijlstra | ff2e6d72 | 2020-02-03 17:37:02 -0800 | [diff] [blame^] | 145 | #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 146 | /* |
| 147 | * Semi RCU freeing of the page directories. |
| 148 | * |
| 149 | * This is needed by some architectures to implement software pagetable walkers. |
| 150 | * |
| 151 | * gup_fast() and other software pagetable walkers do a lockless page-table |
| 152 | * walk and therefore needs some synchronization with the freeing of the page |
| 153 | * directories. The chosen means to accomplish that is by disabling IRQs over |
| 154 | * the walk. |
| 155 | * |
| 156 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, |
| 157 | * since we unlink the page, flush TLBs, free the page. Since the disabling of |
| 158 | * IRQs delays the completion of the TLB flush we can never observe an already |
| 159 | * freed page. |
| 160 | * |
| 161 | * Architectures that do not have this (PPC) need to delay the freeing by some |
| 162 | * other means, this is that means. |
| 163 | * |
| 164 | * What we do is batch the freed directory pages (tables) and RCU free them. |
| 165 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling |
| 166 | * holds off grace periods. |
| 167 | * |
| 168 | * However, in order to batch these pages we need to allocate storage, this |
| 169 | * allocation is deep inside the MM code and can thus easily fail on memory |
| 170 | * pressure. To guarantee progress we fall back to single table freeing, see |
| 171 | * the implementation of tlb_remove_table_one(). |
| 172 | * |
| 173 | */ |
| 174 | struct mmu_table_batch { |
| 175 | struct rcu_head rcu; |
| 176 | unsigned int nr; |
| 177 | void *tables[0]; |
| 178 | }; |
| 179 | |
| 180 | #define MAX_TABLE_BATCH \ |
| 181 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) |
| 182 | |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 183 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); |
| 184 | |
Peter Zijlstra | 0ed1325 | 2020-02-03 17:36:49 -0800 | [diff] [blame] | 185 | /* |
| 186 | * This allows an architecture that does not use the linux page-tables for |
| 187 | * hardware to skip the TLBI when freeing page tables. |
| 188 | */ |
| 189 | #ifndef tlb_needs_table_invalidate |
| 190 | #define tlb_needs_table_invalidate() (true) |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 191 | #endif |
| 192 | |
Peter Zijlstra | 0ed1325 | 2020-02-03 17:36:49 -0800 | [diff] [blame] | 193 | #else |
| 194 | |
| 195 | #ifdef tlb_needs_table_invalidate |
Peter Zijlstra | ff2e6d72 | 2020-02-03 17:37:02 -0800 | [diff] [blame^] | 196 | #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE |
Peter Zijlstra | 0ed1325 | 2020-02-03 17:36:49 -0800 | [diff] [blame] | 197 | #endif |
| 198 | |
Peter Zijlstra | ff2e6d72 | 2020-02-03 17:37:02 -0800 | [diff] [blame^] | 199 | #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ |
Peter Zijlstra | 0ed1325 | 2020-02-03 17:36:49 -0800 | [diff] [blame] | 200 | |
| 201 | |
Martin Schwidefsky | 952a31c | 2018-09-18 14:51:50 +0200 | [diff] [blame] | 202 | #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 203 | /* |
| 204 | * If we can't allocate a page to make a big batch of page pointers |
| 205 | * to work on, then just handle a few from the on-stack structure. |
| 206 | */ |
| 207 | #define MMU_GATHER_BUNDLE 8 |
| 208 | |
Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 209 | struct mmu_gather_batch { |
| 210 | struct mmu_gather_batch *next; |
| 211 | unsigned int nr; |
| 212 | unsigned int max; |
| 213 | struct page *pages[0]; |
| 214 | }; |
| 215 | |
| 216 | #define MAX_GATHER_BATCH \ |
| 217 | ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) |
| 218 | |
Michal Hocko | 53a59fc | 2013-01-04 15:35:12 -0800 | [diff] [blame] | 219 | /* |
| 220 | * Limit the maximum number of mmu_gather batches to reduce a risk of soft |
| 221 | * lockups for non-preemptible kernels on huge machines when a lot of memory |
| 222 | * is zapped during unmapping. |
| 223 | * 10K pages freed at once should be safe even without a preemption point. |
| 224 | */ |
| 225 | #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) |
| 226 | |
Martin Schwidefsky | 952a31c | 2018-09-18 14:51:50 +0200 | [diff] [blame] | 227 | extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, |
| 228 | int page_size); |
| 229 | #endif |
| 230 | |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 231 | /* |
| 232 | * struct mmu_gather is an opaque type used by the mm code for passing around |
Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 233 | * any data needed by arch specific code for tlb_remove_page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | */ |
| 235 | struct mmu_gather { |
| 236 | struct mm_struct *mm; |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 237 | |
Peter Zijlstra | ff2e6d72 | 2020-02-03 17:37:02 -0800 | [diff] [blame^] | 238 | #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 239 | struct mmu_table_batch *batch; |
| 240 | #endif |
Peter Zijlstra | dea2434 | 2018-09-04 10:43:14 +0200 | [diff] [blame] | 241 | |
Alex Shi | 597e1c3 | 2012-06-28 09:02:21 +0800 | [diff] [blame] | 242 | unsigned long start; |
| 243 | unsigned long end; |
Peter Zijlstra | 22a61c3 | 2018-08-23 20:27:25 +0100 | [diff] [blame] | 244 | /* |
| 245 | * we are in the middle of an operation to clear |
| 246 | * a full mm and can make some optimizations |
| 247 | */ |
| 248 | unsigned int fullmm : 1; |
| 249 | |
| 250 | /* |
| 251 | * we have performed an operation which |
| 252 | * requires a complete flush of the tlb |
| 253 | */ |
| 254 | unsigned int need_flush_all : 1; |
| 255 | |
| 256 | /* |
| 257 | * we have removed page directories |
| 258 | */ |
| 259 | unsigned int freed_tables : 1; |
Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 260 | |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 261 | /* |
| 262 | * at which levels have we cleared entries? |
| 263 | */ |
| 264 | unsigned int cleared_ptes : 1; |
| 265 | unsigned int cleared_pmds : 1; |
| 266 | unsigned int cleared_puds : 1; |
| 267 | unsigned int cleared_p4ds : 1; |
| 268 | |
Peter Zijlstra | 5f307be | 2018-09-04 13:18:15 +0200 | [diff] [blame] | 269 | /* |
| 270 | * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma |
| 271 | */ |
| 272 | unsigned int vma_exec : 1; |
| 273 | unsigned int vma_huge : 1; |
| 274 | |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 275 | unsigned int batch_count; |
| 276 | |
Martin Schwidefsky | 952a31c | 2018-09-18 14:51:50 +0200 | [diff] [blame] | 277 | #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER |
Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 278 | struct mmu_gather_batch *active; |
| 279 | struct mmu_gather_batch local; |
| 280 | struct page *__pages[MMU_GATHER_BUNDLE]; |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 281 | |
| 282 | #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE |
| 283 | unsigned int page_size; |
| 284 | #endif |
Martin Schwidefsky | 952a31c | 2018-09-18 14:51:50 +0200 | [diff] [blame] | 285 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | }; |
| 287 | |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 288 | void tlb_flush_mmu(struct mmu_gather *tlb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 290 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 291 | unsigned long address, |
| 292 | unsigned int range_size) |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 293 | { |
| 294 | tlb->start = min(tlb->start, address); |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 295 | tlb->end = max(tlb->end, address + range_size); |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | static inline void __tlb_reset_range(struct mmu_gather *tlb) |
| 299 | { |
Will Deacon | 721c21c | 2015-01-12 19:10:55 +0000 | [diff] [blame] | 300 | if (tlb->fullmm) { |
| 301 | tlb->start = tlb->end = ~0; |
| 302 | } else { |
| 303 | tlb->start = TASK_SIZE; |
| 304 | tlb->end = 0; |
| 305 | } |
Peter Zijlstra | 22a61c3 | 2018-08-23 20:27:25 +0100 | [diff] [blame] | 306 | tlb->freed_tables = 0; |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 307 | tlb->cleared_ptes = 0; |
| 308 | tlb->cleared_pmds = 0; |
| 309 | tlb->cleared_puds = 0; |
| 310 | tlb->cleared_p4ds = 0; |
Peter Zijlstra | 5f307be | 2018-09-04 13:18:15 +0200 | [diff] [blame] | 311 | /* |
| 312 | * Do not reset mmu_gather::vma_* fields here, we do not |
| 313 | * call into tlb_start_vma() again to set them if there is an |
| 314 | * intermediate flush. |
| 315 | */ |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 316 | } |
| 317 | |
Peter Zijlstra | a30e32b | 2018-10-11 16:51:51 +0200 | [diff] [blame] | 318 | #ifdef CONFIG_MMU_GATHER_NO_RANGE |
| 319 | |
| 320 | #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma) |
| 321 | #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma() |
| 322 | #endif |
| 323 | |
| 324 | /* |
| 325 | * When an architecture does not have efficient means of range flushing TLBs |
| 326 | * there is no point in doing intermediate flushes on tlb_end_vma() to keep the |
| 327 | * range small. We equally don't have to worry about page granularity or other |
| 328 | * things. |
| 329 | * |
| 330 | * All we need to do is issue a full flush for any !0 range. |
| 331 | */ |
| 332 | static inline void tlb_flush(struct mmu_gather *tlb) |
| 333 | { |
| 334 | if (tlb->end) |
| 335 | flush_tlb_mm(tlb->mm); |
| 336 | } |
| 337 | |
| 338 | static inline void |
| 339 | tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } |
| 340 | |
| 341 | #define tlb_end_vma tlb_end_vma |
| 342 | static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } |
| 343 | |
| 344 | #else /* CONFIG_MMU_GATHER_NO_RANGE */ |
| 345 | |
Peter Zijlstra | 5f307be | 2018-09-04 13:18:15 +0200 | [diff] [blame] | 346 | #ifndef tlb_flush |
| 347 | |
| 348 | #if defined(tlb_start_vma) || defined(tlb_end_vma) |
| 349 | #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma() |
| 350 | #endif |
| 351 | |
Peter Zijlstra | a30e32b | 2018-10-11 16:51:51 +0200 | [diff] [blame] | 352 | /* |
| 353 | * When an architecture does not provide its own tlb_flush() implementation |
| 354 | * but does have a reasonably efficient flush_vma_range() implementation |
| 355 | * use that. |
| 356 | */ |
Peter Zijlstra | 5f307be | 2018-09-04 13:18:15 +0200 | [diff] [blame] | 357 | static inline void tlb_flush(struct mmu_gather *tlb) |
| 358 | { |
| 359 | if (tlb->fullmm || tlb->need_flush_all) { |
| 360 | flush_tlb_mm(tlb->mm); |
| 361 | } else if (tlb->end) { |
| 362 | struct vm_area_struct vma = { |
| 363 | .vm_mm = tlb->mm, |
| 364 | .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | |
| 365 | (tlb->vma_huge ? VM_HUGETLB : 0), |
| 366 | }; |
| 367 | |
| 368 | flush_tlb_range(&vma, tlb->start, tlb->end); |
| 369 | } |
| 370 | } |
| 371 | |
| 372 | static inline void |
| 373 | tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) |
| 374 | { |
| 375 | /* |
| 376 | * flush_tlb_range() implementations that look at VM_HUGETLB (tile, |
| 377 | * mips-4k) flush only large pages. |
| 378 | * |
| 379 | * flush_tlb_range() implementations that flush I-TLB also flush D-TLB |
| 380 | * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing |
| 381 | * range. |
| 382 | * |
| 383 | * We rely on tlb_end_vma() to issue a flush, such that when we reset |
| 384 | * these values the batch is empty. |
| 385 | */ |
| 386 | tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB); |
| 387 | tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); |
| 388 | } |
| 389 | |
| 390 | #else |
| 391 | |
| 392 | static inline void |
| 393 | tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } |
| 394 | |
| 395 | #endif |
| 396 | |
Peter Zijlstra | a30e32b | 2018-10-11 16:51:51 +0200 | [diff] [blame] | 397 | #endif /* CONFIG_MMU_GATHER_NO_RANGE */ |
| 398 | |
Nicholas Piggin | fd1102f | 2018-08-23 18:47:09 +1000 | [diff] [blame] | 399 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
| 400 | { |
Peter Zijlstra | 0758cd8 | 2020-02-03 17:36:53 -0800 | [diff] [blame] | 401 | /* |
| 402 | * Anything calling __tlb_adjust_range() also sets at least one of |
| 403 | * these bits. |
| 404 | */ |
| 405 | if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || |
| 406 | tlb->cleared_puds || tlb->cleared_p4ds)) |
Nicholas Piggin | fd1102f | 2018-08-23 18:47:09 +1000 | [diff] [blame] | 407 | return; |
| 408 | |
| 409 | tlb_flush(tlb); |
| 410 | mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); |
| 411 | __tlb_reset_range(tlb); |
| 412 | } |
| 413 | |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 414 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, |
| 415 | struct page *page, int page_size) |
| 416 | { |
Aneesh Kumar K.V | 692a68c | 2016-12-12 16:42:43 -0800 | [diff] [blame] | 417 | if (__tlb_remove_page_size(tlb, page, page_size)) |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 418 | tlb_flush_mmu(tlb); |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 419 | } |
| 420 | |
Aneesh Kumar K.V | 692a68c | 2016-12-12 16:42:43 -0800 | [diff] [blame] | 421 | static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 422 | { |
| 423 | return __tlb_remove_page_size(tlb, page, PAGE_SIZE); |
| 424 | } |
| 425 | |
Aneesh Kumar K.V | e9d55e1 | 2016-07-26 15:24:09 -0700 | [diff] [blame] | 426 | /* tlb_remove_page |
| 427 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when |
| 428 | * required. |
| 429 | */ |
| 430 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 431 | { |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 432 | return tlb_remove_page_size(tlb, page, PAGE_SIZE); |
Aneesh Kumar K.V | e9d55e1 | 2016-07-26 15:24:09 -0700 | [diff] [blame] | 433 | } |
| 434 | |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 435 | static inline void tlb_change_page_size(struct mmu_gather *tlb, |
Aneesh Kumar K.V | 07e3266 | 2016-12-12 16:42:40 -0800 | [diff] [blame] | 436 | unsigned int page_size) |
| 437 | { |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 438 | #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE |
| 439 | if (tlb->page_size && tlb->page_size != page_size) { |
Aneesh Kumar K.V | 864edb7 | 2019-10-24 13:28:01 +0530 | [diff] [blame] | 440 | if (!tlb->fullmm && !tlb->need_flush_all) |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 441 | tlb_flush_mmu(tlb); |
| 442 | } |
| 443 | |
Aneesh Kumar K.V | 07e3266 | 2016-12-12 16:42:40 -0800 | [diff] [blame] | 444 | tlb->page_size = page_size; |
| 445 | #endif |
| 446 | } |
Aneesh Kumar K.V | 07e3266 | 2016-12-12 16:42:40 -0800 | [diff] [blame] | 447 | |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 448 | static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) |
| 449 | { |
| 450 | if (tlb->cleared_ptes) |
| 451 | return PAGE_SHIFT; |
| 452 | if (tlb->cleared_pmds) |
| 453 | return PMD_SHIFT; |
| 454 | if (tlb->cleared_puds) |
| 455 | return PUD_SHIFT; |
| 456 | if (tlb->cleared_p4ds) |
| 457 | return P4D_SHIFT; |
| 458 | |
| 459 | return PAGE_SHIFT; |
| 460 | } |
| 461 | |
| 462 | static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) |
| 463 | { |
| 464 | return 1UL << tlb_get_unmap_shift(tlb); |
| 465 | } |
| 466 | |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 467 | /* |
| 468 | * In the case of tlb vma handling, we can optimise these away in the |
| 469 | * case where we're doing a full MM flush. When we're doing a munmap, |
| 470 | * the vmas are adjusted to only cover the region to be torn down. |
| 471 | */ |
| 472 | #ifndef tlb_start_vma |
Peter Zijlstra | 5f307be | 2018-09-04 13:18:15 +0200 | [diff] [blame] | 473 | static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
| 474 | { |
| 475 | if (tlb->fullmm) |
| 476 | return; |
| 477 | |
| 478 | tlb_update_vma_flags(tlb, vma); |
| 479 | flush_cache_range(vma, vma->vm_start, vma->vm_end); |
| 480 | } |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 481 | #endif |
| 482 | |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 483 | #ifndef tlb_end_vma |
Peter Zijlstra | 5f307be | 2018-09-04 13:18:15 +0200 | [diff] [blame] | 484 | static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
| 485 | { |
| 486 | if (tlb->fullmm) |
| 487 | return; |
| 488 | |
| 489 | /* |
| 490 | * Do a TLB flush and reset the range at VMA boundaries; this avoids |
| 491 | * the ranges growing with the unused space between consecutive VMAs, |
| 492 | * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on |
| 493 | * this. |
| 494 | */ |
| 495 | tlb_flush_mmu_tlbonly(tlb); |
| 496 | } |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 497 | #endif |
| 498 | |
| 499 | #ifndef __tlb_remove_tlb_entry |
| 500 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) |
| 501 | #endif |
| 502 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | /** |
| 504 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. |
| 505 | * |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 506 | * Record the fact that pte's were really unmapped by updating the range, |
| 507 | * so we can later optimise away the tlb invalidate. This helps when |
| 508 | * userspace is unmapping already-unmapped pages, which happens quite a lot. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | */ |
| 510 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ |
| 511 | do { \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 512 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 513 | tlb->cleared_ptes = 1; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
| 515 | } while (0) |
| 516 | |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 517 | #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ |
| 518 | do { \ |
| 519 | unsigned long _sz = huge_page_size(h); \ |
| 520 | __tlb_adjust_range(tlb, address, _sz); \ |
| 521 | if (_sz == PMD_SIZE) \ |
| 522 | tlb->cleared_pmds = 1; \ |
| 523 | else if (_sz == PUD_SIZE) \ |
| 524 | tlb->cleared_puds = 1; \ |
| 525 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
Aneesh Kumar K.V | b528e4b | 2016-12-12 16:42:37 -0800 | [diff] [blame] | 526 | } while (0) |
| 527 | |
Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 528 | /** |
| 529 | * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation |
| 530 | * This is a nop so far, because only x86 needs it. |
| 531 | */ |
| 532 | #ifndef __tlb_remove_pmd_tlb_entry |
| 533 | #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) |
| 534 | #endif |
| 535 | |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 536 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ |
| 537 | do { \ |
| 538 | __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 539 | tlb->cleared_pmds = 1; \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 540 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ |
Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 541 | } while (0) |
| 542 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 543 | /** |
| 544 | * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb |
| 545 | * invalidation. This is a nop so far, because only x86 needs it. |
| 546 | */ |
| 547 | #ifndef __tlb_remove_pud_tlb_entry |
| 548 | #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) |
| 549 | #endif |
| 550 | |
| 551 | #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ |
| 552 | do { \ |
| 553 | __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 554 | tlb->cleared_puds = 1; \ |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 555 | __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ |
| 556 | } while (0) |
| 557 | |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 558 | /* |
| 559 | * For things like page tables caches (ie caching addresses "inside" the |
| 560 | * page tables, like x86 does), for legacy reasons, flushing an |
| 561 | * individual page had better flush the page table caches behind it. This |
| 562 | * is definitely how x86 works, for example. And if you have an |
| 563 | * architected non-legacy page table cache (which I'm not aware of |
| 564 | * anybody actually doing), you're going to have some architecturally |
| 565 | * explicit flushing for that, likely *separate* from a regular TLB entry |
| 566 | * flush, and thus you'd need more than just some range expansion.. |
| 567 | * |
| 568 | * So if we ever find an architecture |
| 569 | * that would want something that odd, I think it is up to that |
| 570 | * architecture to do its own odd thing, not cause pain for others |
| 571 | * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com |
| 572 | * |
| 573 | * For now w.r.t page table cache, mark the range_size as PAGE_SIZE |
| 574 | */ |
| 575 | |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 576 | #ifndef pte_free_tlb |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 577 | #define pte_free_tlb(tlb, ptep, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | do { \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 579 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 580 | tlb->freed_tables = 1; \ |
| 581 | tlb->cleared_pmds = 1; \ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 582 | __pte_free_tlb(tlb, ptep, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | } while (0) |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 584 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 586 | #ifndef pmd_free_tlb |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 587 | #define pmd_free_tlb(tlb, pmdp, address) \ |
| 588 | do { \ |
Peter Zijlstra | 22a61c3 | 2018-08-23 20:27:25 +0100 | [diff] [blame] | 589 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 590 | tlb->freed_tables = 1; \ |
| 591 | tlb->cleared_puds = 1; \ |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 592 | __pmd_free_tlb(tlb, pmdp, address); \ |
| 593 | } while (0) |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 594 | #endif |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 595 | |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 596 | #ifndef pud_free_tlb |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 597 | #define pud_free_tlb(tlb, pudp, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | do { \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 599 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 600 | tlb->freed_tables = 1; \ |
| 601 | tlb->cleared_p4ds = 1; \ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 602 | __pud_free_tlb(tlb, pudp, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | } while (0) |
| 604 | #endif |
| 605 | |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 606 | #ifndef p4d_free_tlb |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 607 | #define p4d_free_tlb(tlb, pudp, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | do { \ |
Peter Zijlstra | 22a61c3 | 2018-08-23 20:27:25 +0100 | [diff] [blame] | 609 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 610 | tlb->freed_tables = 1; \ |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 611 | __p4d_free_tlb(tlb, pudp, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | } while (0) |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 613 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | |
Will Deacon | faaadaf | 2018-08-24 13:28:28 +0100 | [diff] [blame] | 615 | #endif /* CONFIG_MMU */ |
| 616 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | #endif /* _ASM_GENERIC__TLB_H */ |