Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 1 | /* include/asm-generic/tlb.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * |
| 3 | * Generic TLB shootdown code |
| 4 | * |
| 5 | * Copyright 2001 Red Hat, Inc. |
| 6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. |
| 7 | * |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 8 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 9 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * This program is free software; you can redistribute it and/or |
| 11 | * modify it under the terms of the GNU General Public License |
| 12 | * as published by the Free Software Foundation; either version |
| 13 | * 2 of the License, or (at your option) any later version. |
| 14 | */ |
| 15 | #ifndef _ASM_GENERIC__TLB_H |
| 16 | #define _ASM_GENERIC__TLB_H |
| 17 | |
Nicholas Piggin | fd1102f | 2018-08-23 18:47:09 +1000 | [diff] [blame] | 18 | #include <linux/mmu_notifier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/swap.h> |
Ingo Molnar | 62152d0 | 2008-01-31 22:05:48 +0100 | [diff] [blame] | 20 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <asm/tlbflush.h> |
| 22 | |
Will Deacon | faaadaf | 2018-08-24 13:28:28 +0100 | [diff] [blame] | 23 | #ifdef CONFIG_MMU |
| 24 | |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 25 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 26 | /* |
| 27 | * Semi RCU freeing of the page directories. |
| 28 | * |
| 29 | * This is needed by some architectures to implement software pagetable walkers. |
| 30 | * |
| 31 | * gup_fast() and other software pagetable walkers do a lockless page-table |
| 32 | * walk and therefore needs some synchronization with the freeing of the page |
| 33 | * directories. The chosen means to accomplish that is by disabling IRQs over |
| 34 | * the walk. |
| 35 | * |
| 36 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, |
| 37 | * since we unlink the page, flush TLBs, free the page. Since the disabling of |
| 38 | * IRQs delays the completion of the TLB flush we can never observe an already |
| 39 | * freed page. |
| 40 | * |
| 41 | * Architectures that do not have this (PPC) need to delay the freeing by some |
| 42 | * other means, this is that means. |
| 43 | * |
| 44 | * What we do is batch the freed directory pages (tables) and RCU free them. |
| 45 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling |
| 46 | * holds off grace periods. |
| 47 | * |
| 48 | * However, in order to batch these pages we need to allocate storage, this |
| 49 | * allocation is deep inside the MM code and can thus easily fail on memory |
| 50 | * pressure. To guarantee progress we fall back to single table freeing, see |
| 51 | * the implementation of tlb_remove_table_one(). |
| 52 | * |
| 53 | */ |
| 54 | struct mmu_table_batch { |
| 55 | struct rcu_head rcu; |
| 56 | unsigned int nr; |
| 57 | void *tables[0]; |
| 58 | }; |
| 59 | |
| 60 | #define MAX_TABLE_BATCH \ |
| 61 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) |
| 62 | |
| 63 | extern void tlb_table_flush(struct mmu_gather *tlb); |
| 64 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); |
| 65 | |
| 66 | #endif |
| 67 | |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 68 | /* |
| 69 | * If we can't allocate a page to make a big batch of page pointers |
| 70 | * to work on, then just handle a few from the on-stack structure. |
| 71 | */ |
| 72 | #define MMU_GATHER_BUNDLE 8 |
| 73 | |
Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 74 | struct mmu_gather_batch { |
| 75 | struct mmu_gather_batch *next; |
| 76 | unsigned int nr; |
| 77 | unsigned int max; |
| 78 | struct page *pages[0]; |
| 79 | }; |
| 80 | |
| 81 | #define MAX_GATHER_BATCH \ |
| 82 | ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) |
| 83 | |
Michal Hocko | 53a59fc | 2013-01-04 15:35:12 -0800 | [diff] [blame] | 84 | /* |
| 85 | * Limit the maximum number of mmu_gather batches to reduce a risk of soft |
| 86 | * lockups for non-preemptible kernels on huge machines when a lot of memory |
| 87 | * is zapped during unmapping. |
| 88 | * 10K pages freed at once should be safe even without a preemption point. |
| 89 | */ |
| 90 | #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) |
| 91 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | /* struct mmu_gather is an opaque type used by the mm code for passing around |
Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 93 | * any data needed by arch specific code for tlb_remove_page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | */ |
| 95 | struct mmu_gather { |
| 96 | struct mm_struct *mm; |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 97 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 98 | struct mmu_table_batch *batch; |
| 99 | #endif |
Alex Shi | 597e1c3 | 2012-06-28 09:02:21 +0800 | [diff] [blame] | 100 | unsigned long start; |
| 101 | unsigned long end; |
Peter Zijlstra | 22a61c3 | 2018-08-23 20:27:25 +0100 | [diff] [blame] | 102 | /* |
| 103 | * we are in the middle of an operation to clear |
| 104 | * a full mm and can make some optimizations |
| 105 | */ |
| 106 | unsigned int fullmm : 1; |
| 107 | |
| 108 | /* |
| 109 | * we have performed an operation which |
| 110 | * requires a complete flush of the tlb |
| 111 | */ |
| 112 | unsigned int need_flush_all : 1; |
| 113 | |
| 114 | /* |
| 115 | * we have removed page directories |
| 116 | */ |
| 117 | unsigned int freed_tables : 1; |
Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 118 | |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 119 | /* |
| 120 | * at which levels have we cleared entries? |
| 121 | */ |
| 122 | unsigned int cleared_ptes : 1; |
| 123 | unsigned int cleared_pmds : 1; |
| 124 | unsigned int cleared_puds : 1; |
| 125 | unsigned int cleared_p4ds : 1; |
| 126 | |
Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 127 | struct mmu_gather_batch *active; |
| 128 | struct mmu_gather_batch local; |
| 129 | struct page *__pages[MMU_GATHER_BUNDLE]; |
Michal Hocko | 53a59fc | 2013-01-04 15:35:12 -0800 | [diff] [blame] | 130 | unsigned int batch_count; |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 131 | int page_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | }; |
| 133 | |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 134 | #define HAVE_GENERIC_MMU_GATHER |
| 135 | |
Minchan Kim | 56236a5 | 2017-08-10 15:24:05 -0700 | [diff] [blame] | 136 | void arch_tlb_gather_mmu(struct mmu_gather *tlb, |
| 137 | struct mm_struct *mm, unsigned long start, unsigned long end); |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 138 | void tlb_flush_mmu(struct mmu_gather *tlb); |
Minchan Kim | 56236a5 | 2017-08-10 15:24:05 -0700 | [diff] [blame] | 139 | void arch_tlb_finish_mmu(struct mmu_gather *tlb, |
Minchan Kim | 99baac2 | 2017-08-10 15:24:12 -0700 | [diff] [blame] | 140 | unsigned long start, unsigned long end, bool force); |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 141 | void tlb_flush_mmu_free(struct mmu_gather *tlb); |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 142 | extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, |
| 143 | int page_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 145 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 146 | unsigned long address, |
| 147 | unsigned int range_size) |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 148 | { |
| 149 | tlb->start = min(tlb->start, address); |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 150 | tlb->end = max(tlb->end, address + range_size); |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 151 | } |
| 152 | |
| 153 | static inline void __tlb_reset_range(struct mmu_gather *tlb) |
| 154 | { |
Will Deacon | 721c21c | 2015-01-12 19:10:55 +0000 | [diff] [blame] | 155 | if (tlb->fullmm) { |
| 156 | tlb->start = tlb->end = ~0; |
| 157 | } else { |
| 158 | tlb->start = TASK_SIZE; |
| 159 | tlb->end = 0; |
| 160 | } |
Peter Zijlstra | 22a61c3 | 2018-08-23 20:27:25 +0100 | [diff] [blame] | 161 | tlb->freed_tables = 0; |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 162 | tlb->cleared_ptes = 0; |
| 163 | tlb->cleared_pmds = 0; |
| 164 | tlb->cleared_puds = 0; |
| 165 | tlb->cleared_p4ds = 0; |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 166 | } |
| 167 | |
Nicholas Piggin | fd1102f | 2018-08-23 18:47:09 +1000 | [diff] [blame] | 168 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
| 169 | { |
| 170 | if (!tlb->end) |
| 171 | return; |
| 172 | |
| 173 | tlb_flush(tlb); |
| 174 | mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); |
| 175 | __tlb_reset_range(tlb); |
| 176 | } |
| 177 | |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 178 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, |
| 179 | struct page *page, int page_size) |
| 180 | { |
Aneesh Kumar K.V | 692a68c | 2016-12-12 16:42:43 -0800 | [diff] [blame] | 181 | if (__tlb_remove_page_size(tlb, page, page_size)) |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 182 | tlb_flush_mmu(tlb); |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 183 | } |
| 184 | |
Aneesh Kumar K.V | 692a68c | 2016-12-12 16:42:43 -0800 | [diff] [blame] | 185 | static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 186 | { |
| 187 | return __tlb_remove_page_size(tlb, page, PAGE_SIZE); |
| 188 | } |
| 189 | |
Aneesh Kumar K.V | e9d55e1 | 2016-07-26 15:24:09 -0700 | [diff] [blame] | 190 | /* tlb_remove_page |
| 191 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when |
| 192 | * required. |
| 193 | */ |
| 194 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 195 | { |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 196 | return tlb_remove_page_size(tlb, page, PAGE_SIZE); |
Aneesh Kumar K.V | e9d55e1 | 2016-07-26 15:24:09 -0700 | [diff] [blame] | 197 | } |
| 198 | |
Aneesh Kumar K.V | 07e3266 | 2016-12-12 16:42:40 -0800 | [diff] [blame] | 199 | #ifndef tlb_remove_check_page_size_change |
| 200 | #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change |
| 201 | static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, |
| 202 | unsigned int page_size) |
| 203 | { |
| 204 | /* |
| 205 | * We don't care about page size change, just update |
| 206 | * mmu_gather page size here so that debug checks |
| 207 | * doesn't throw false warning. |
| 208 | */ |
| 209 | #ifdef CONFIG_DEBUG_VM |
| 210 | tlb->page_size = page_size; |
| 211 | #endif |
| 212 | } |
| 213 | #endif |
| 214 | |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 215 | static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) |
| 216 | { |
| 217 | if (tlb->cleared_ptes) |
| 218 | return PAGE_SHIFT; |
| 219 | if (tlb->cleared_pmds) |
| 220 | return PMD_SHIFT; |
| 221 | if (tlb->cleared_puds) |
| 222 | return PUD_SHIFT; |
| 223 | if (tlb->cleared_p4ds) |
| 224 | return P4D_SHIFT; |
| 225 | |
| 226 | return PAGE_SHIFT; |
| 227 | } |
| 228 | |
| 229 | static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) |
| 230 | { |
| 231 | return 1UL << tlb_get_unmap_shift(tlb); |
| 232 | } |
| 233 | |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 234 | /* |
| 235 | * In the case of tlb vma handling, we can optimise these away in the |
| 236 | * case where we're doing a full MM flush. When we're doing a munmap, |
| 237 | * the vmas are adjusted to only cover the region to be torn down. |
| 238 | */ |
| 239 | #ifndef tlb_start_vma |
| 240 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 241 | #endif |
| 242 | |
| 243 | #define __tlb_end_vma(tlb, vma) \ |
| 244 | do { \ |
Nicholas Piggin | fd1102f | 2018-08-23 18:47:09 +1000 | [diff] [blame] | 245 | if (!tlb->fullmm) \ |
| 246 | tlb_flush_mmu_tlbonly(tlb); \ |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 247 | } while (0) |
| 248 | |
| 249 | #ifndef tlb_end_vma |
| 250 | #define tlb_end_vma __tlb_end_vma |
| 251 | #endif |
| 252 | |
| 253 | #ifndef __tlb_remove_tlb_entry |
| 254 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) |
| 255 | #endif |
| 256 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | /** |
| 258 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. |
| 259 | * |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 260 | * Record the fact that pte's were really unmapped by updating the range, |
| 261 | * so we can later optimise away the tlb invalidate. This helps when |
| 262 | * userspace is unmapping already-unmapped pages, which happens quite a lot. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | */ |
| 264 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ |
| 265 | do { \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 266 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 267 | tlb->cleared_ptes = 1; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
| 269 | } while (0) |
| 270 | |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 271 | #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ |
| 272 | do { \ |
| 273 | unsigned long _sz = huge_page_size(h); \ |
| 274 | __tlb_adjust_range(tlb, address, _sz); \ |
| 275 | if (_sz == PMD_SIZE) \ |
| 276 | tlb->cleared_pmds = 1; \ |
| 277 | else if (_sz == PUD_SIZE) \ |
| 278 | tlb->cleared_puds = 1; \ |
| 279 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
Aneesh Kumar K.V | b528e4b | 2016-12-12 16:42:37 -0800 | [diff] [blame] | 280 | } while (0) |
| 281 | |
Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 282 | /** |
| 283 | * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation |
| 284 | * This is a nop so far, because only x86 needs it. |
| 285 | */ |
| 286 | #ifndef __tlb_remove_pmd_tlb_entry |
| 287 | #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) |
| 288 | #endif |
| 289 | |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 290 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ |
| 291 | do { \ |
| 292 | __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 293 | tlb->cleared_pmds = 1; \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 294 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ |
Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 295 | } while (0) |
| 296 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 297 | /** |
| 298 | * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb |
| 299 | * invalidation. This is a nop so far, because only x86 needs it. |
| 300 | */ |
| 301 | #ifndef __tlb_remove_pud_tlb_entry |
| 302 | #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) |
| 303 | #endif |
| 304 | |
| 305 | #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ |
| 306 | do { \ |
| 307 | __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 308 | tlb->cleared_puds = 1; \ |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 309 | __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ |
| 310 | } while (0) |
| 311 | |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 312 | /* |
| 313 | * For things like page tables caches (ie caching addresses "inside" the |
| 314 | * page tables, like x86 does), for legacy reasons, flushing an |
| 315 | * individual page had better flush the page table caches behind it. This |
| 316 | * is definitely how x86 works, for example. And if you have an |
| 317 | * architected non-legacy page table cache (which I'm not aware of |
| 318 | * anybody actually doing), you're going to have some architecturally |
| 319 | * explicit flushing for that, likely *separate* from a regular TLB entry |
| 320 | * flush, and thus you'd need more than just some range expansion.. |
| 321 | * |
| 322 | * So if we ever find an architecture |
| 323 | * that would want something that odd, I think it is up to that |
| 324 | * architecture to do its own odd thing, not cause pain for others |
| 325 | * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com |
| 326 | * |
| 327 | * For now w.r.t page table cache, mark the range_size as PAGE_SIZE |
| 328 | */ |
| 329 | |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 330 | #ifndef pte_free_tlb |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 331 | #define pte_free_tlb(tlb, ptep, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | do { \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 333 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 334 | tlb->freed_tables = 1; \ |
| 335 | tlb->cleared_pmds = 1; \ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 336 | __pte_free_tlb(tlb, ptep, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | } while (0) |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 338 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 340 | #ifndef pmd_free_tlb |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 341 | #define pmd_free_tlb(tlb, pmdp, address) \ |
| 342 | do { \ |
Peter Zijlstra | 22a61c3 | 2018-08-23 20:27:25 +0100 | [diff] [blame] | 343 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 344 | tlb->freed_tables = 1; \ |
| 345 | tlb->cleared_puds = 1; \ |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 346 | __pmd_free_tlb(tlb, pmdp, address); \ |
| 347 | } while (0) |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 348 | #endif |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 349 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | #ifndef __ARCH_HAS_4LEVEL_HACK |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 351 | #ifndef pud_free_tlb |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 352 | #define pud_free_tlb(tlb, pudp, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | do { \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 354 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 355 | tlb->freed_tables = 1; \ |
| 356 | tlb->cleared_p4ds = 1; \ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 357 | __pud_free_tlb(tlb, pudp, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | } while (0) |
| 359 | #endif |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 360 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 362 | #ifndef __ARCH_HAS_5LEVEL_HACK |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 363 | #ifndef p4d_free_tlb |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 364 | #define p4d_free_tlb(tlb, pudp, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | do { \ |
Peter Zijlstra | 22a61c3 | 2018-08-23 20:27:25 +0100 | [diff] [blame] | 366 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Will Deacon | a6d6024 | 2018-08-23 21:01:46 +0100 | [diff] [blame] | 367 | tlb->freed_tables = 1; \ |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 368 | __p4d_free_tlb(tlb, pudp, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | } while (0) |
Kirill A. Shutemov | 048456d | 2017-03-09 17:24:06 +0300 | [diff] [blame] | 370 | #endif |
Nicholas Piggin | a90744b | 2018-07-13 16:59:03 -0700 | [diff] [blame] | 371 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | |
Will Deacon | faaadaf | 2018-08-24 13:28:28 +0100 | [diff] [blame] | 373 | #endif /* CONFIG_MMU */ |
| 374 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | #define tlb_migrate_finish(mm) do {} while (0) |
| 376 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | #endif /* _ASM_GENERIC__TLB_H */ |