Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Based on arch/arm/include/asm/tlbflush.h |
| 4 | * |
| 5 | * Copyright (C) 1999-2003 Russell King |
| 6 | * Copyright (C) 2012 ARM Ltd. |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 7 | */ |
| 8 | #ifndef __ASM_TLBFLUSH_H |
| 9 | #define __ASM_TLBFLUSH_H |
| 10 | |
| 11 | #ifndef __ASSEMBLY__ |
| 12 | |
Marc Zyngier | c10bc62 | 2019-01-02 10:21:29 +0000 | [diff] [blame] | 13 | #include <linux/bitfield.h> |
Alex Van Brunt | 3403e56 | 2018-10-29 14:55:58 +0530 | [diff] [blame] | 14 | #include <linux/mm_types.h> |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 15 | #include <linux/sched.h> |
| 16 | #include <asm/cputype.h> |
Will Deacon | 9b0de86 | 2017-08-10 14:13:33 +0100 | [diff] [blame] | 17 | #include <asm/mmu.h> |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 18 | |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 19 | /* |
Mark Rutland | db68f3e | 2016-09-13 11:16:06 +0100 | [diff] [blame] | 20 | * Raw TLBI operations. |
| 21 | * |
| 22 | * Where necessary, use the __tlbi() macro to avoid asm() |
| 23 | * boilerplate. Drivers and most kernel code should use the TLB |
| 24 | * management routines in preference to the macro below. |
| 25 | * |
| 26 | * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending |
| 27 | * on whether a particular TLBI operation takes an argument or |
| 28 | * not. The macros handles invoking the asm with or without the |
| 29 | * register argument as appropriate. |
| 30 | */ |
Christopher Covington | d9ff80f | 2017-01-31 12:50:19 -0500 | [diff] [blame] | 31 | #define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \ |
| 32 | ALTERNATIVE("nop\n nop", \ |
| 33 | "dsb ish\n tlbi " #op, \ |
| 34 | ARM64_WORKAROUND_REPEAT_TLBI, \ |
Catalin Marinas | ce8c80c | 2018-11-19 11:27:28 +0000 | [diff] [blame] | 35 | CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ |
Christopher Covington | d9ff80f | 2017-01-31 12:50:19 -0500 | [diff] [blame] | 36 | : : ) |
| 37 | |
| 38 | #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ |
| 39 | ALTERNATIVE("nop\n nop", \ |
| 40 | "dsb ish\n tlbi " #op ", %0", \ |
| 41 | ARM64_WORKAROUND_REPEAT_TLBI, \ |
Catalin Marinas | ce8c80c | 2018-11-19 11:27:28 +0000 | [diff] [blame] | 42 | CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ |
Christopher Covington | d9ff80f | 2017-01-31 12:50:19 -0500 | [diff] [blame] | 43 | : : "r" (arg)) |
| 44 | |
| 45 | #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) |
Mark Rutland | db68f3e | 2016-09-13 11:16:06 +0100 | [diff] [blame] | 46 | |
| 47 | #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) |
| 48 | |
Will Deacon | 9b0de86 | 2017-08-10 14:13:33 +0100 | [diff] [blame] | 49 | #define __tlbi_user(op, arg) do { \ |
| 50 | if (arm64_kernel_unmapped_at_el0()) \ |
| 51 | __tlbi(op, (arg) | USER_ASID_FLAG); \ |
| 52 | } while (0) |
| 53 | |
Philip Elcan | 7f17049 | 2018-03-27 21:55:32 -0400 | [diff] [blame] | 54 | /* This macro creates a properly formatted VA operand for the TLBI */ |
| 55 | #define __TLBI_VADDR(addr, asid) \ |
| 56 | ({ \ |
| 57 | unsigned long __ta = (addr) >> 12; \ |
| 58 | __ta &= GENMASK_ULL(43, 0); \ |
| 59 | __ta |= (unsigned long)(asid) << 48; \ |
| 60 | __ta; \ |
| 61 | }) |
| 62 | |
Mark Rutland | db68f3e | 2016-09-13 11:16:06 +0100 | [diff] [blame] | 63 | /* |
Marc Zyngier | c10bc62 | 2019-01-02 10:21:29 +0000 | [diff] [blame] | 64 | * Level-based TLBI operations. |
| 65 | * |
| 66 | * When ARMv8.4-TTL exists, TLBI operations take an additional hint for |
| 67 | * the level at which the invalidation must take place. If the level is |
| 68 | * wrong, no invalidation may take place. In the case where the level |
| 69 | * cannot be easily determined, a 0 value for the level parameter will |
| 70 | * perform a non-hinted invalidation. |
| 71 | * |
| 72 | * For Stage-2 invalidation, use the level values provided to that effect |
| 73 | * in asm/stage2_pgtable.h. |
| 74 | */ |
| 75 | #define TLBI_TTL_MASK GENMASK_ULL(47, 44) |
| 76 | #define TLBI_TTL_TG_4K 1 |
| 77 | #define TLBI_TTL_TG_16K 2 |
| 78 | #define TLBI_TTL_TG_64K 3 |
| 79 | |
| 80 | #define __tlbi_level(op, addr, level) \ |
| 81 | do { \ |
| 82 | u64 arg = addr; \ |
| 83 | \ |
| 84 | if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \ |
| 85 | level) { \ |
| 86 | u64 ttl = level & 3; \ |
| 87 | \ |
| 88 | switch (PAGE_SIZE) { \ |
| 89 | case SZ_4K: \ |
| 90 | ttl |= TLBI_TTL_TG_4K << 2; \ |
| 91 | break; \ |
| 92 | case SZ_16K: \ |
| 93 | ttl |= TLBI_TTL_TG_16K << 2; \ |
| 94 | break; \ |
| 95 | case SZ_64K: \ |
| 96 | ttl |= TLBI_TTL_TG_64K << 2; \ |
| 97 | break; \ |
| 98 | } \ |
| 99 | \ |
| 100 | arg &= ~TLBI_TTL_MASK; \ |
| 101 | arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \ |
| 102 | } \ |
| 103 | \ |
| 104 | __tlbi(op, arg); \ |
| 105 | } while(0) |
| 106 | |
Zhenyu Ye | e735b98 | 2020-06-25 16:03:11 +0800 | [diff] [blame] | 107 | #define __tlbi_user_level(op, arg, level) do { \ |
| 108 | if (arm64_kernel_unmapped_at_el0()) \ |
| 109 | __tlbi_level(op, (arg | USER_ASID_FLAG), level); \ |
| 110 | } while (0) |
| 111 | |
Marc Zyngier | c10bc62 | 2019-01-02 10:21:29 +0000 | [diff] [blame] | 112 | /* |
Will Deacon | 7f08872 | 2018-08-28 14:52:17 +0100 | [diff] [blame] | 113 | * TLB Invalidation |
| 114 | * ================ |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 115 | * |
Will Deacon | 7f08872 | 2018-08-28 14:52:17 +0100 | [diff] [blame] | 116 | * This header file implements the low-level TLB invalidation routines |
| 117 | * (sometimes referred to as "flushing" in the kernel) for arm64. |
| 118 | * |
| 119 | * Every invalidation operation uses the following template: |
| 120 | * |
| 121 | * DSB ISHST // Ensure prior page-table updates have completed |
| 122 | * TLBI ... // Invalidate the TLB |
| 123 | * DSB ISH // Ensure the TLB invalidation has completed |
| 124 | * if (invalidated kernel mappings) |
| 125 | * ISB // Discard any instructions fetched from the old mapping |
| 126 | * |
| 127 | * |
| 128 | * The following functions form part of the "core" TLB invalidation API, |
| 129 | * as documented in Documentation/core-api/cachetlb.rst: |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 130 | * |
| 131 | * flush_tlb_all() |
Will Deacon | 7f08872 | 2018-08-28 14:52:17 +0100 | [diff] [blame] | 132 | * Invalidate the entire TLB (kernel + user) on all CPUs |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 133 | * |
| 134 | * flush_tlb_mm(mm) |
Will Deacon | 7f08872 | 2018-08-28 14:52:17 +0100 | [diff] [blame] | 135 | * Invalidate an entire user address space on all CPUs. |
| 136 | * The 'mm' argument identifies the ASID to invalidate. |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 137 | * |
Will Deacon | 7f08872 | 2018-08-28 14:52:17 +0100 | [diff] [blame] | 138 | * flush_tlb_range(vma, start, end) |
| 139 | * Invalidate the virtual-address range '[start, end)' on all |
| 140 | * CPUs for the user address space corresponding to 'vma->mm'. |
| 141 | * Note that this operation also invalidates any walk-cache |
| 142 | * entries associated with translations for the specified address |
| 143 | * range. |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 144 | * |
Will Deacon | 7f08872 | 2018-08-28 14:52:17 +0100 | [diff] [blame] | 145 | * flush_tlb_kernel_range(start, end) |
| 146 | * Same as flush_tlb_range(..., start, end), but applies to |
| 147 | * kernel mappings rather than a particular user address space. |
| 148 | * Whilst not explicitly documented, this function is used when |
| 149 | * unmapping pages from vmalloc/io space. |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 150 | * |
Will Deacon | 7f08872 | 2018-08-28 14:52:17 +0100 | [diff] [blame] | 151 | * flush_tlb_page(vma, addr) |
| 152 | * Invalidate a single user mapping for address 'addr' in the |
| 153 | * address space corresponding to 'vma->mm'. Note that this |
| 154 | * operation only invalidates a single, last-level page-table |
| 155 | * entry and therefore does not affect any walk-caches. |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 156 | * |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 157 | * |
Will Deacon | 7f08872 | 2018-08-28 14:52:17 +0100 | [diff] [blame] | 158 | * Next, we have some undocumented invalidation routines that you probably |
| 159 | * don't want to call unless you know what you're doing: |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 160 | * |
Will Deacon | 7f08872 | 2018-08-28 14:52:17 +0100 | [diff] [blame] | 161 | * local_flush_tlb_all() |
| 162 | * Same as flush_tlb_all(), but only applies to the calling CPU. |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 163 | * |
Will Deacon | 7f08872 | 2018-08-28 14:52:17 +0100 | [diff] [blame] | 164 | * __flush_tlb_kernel_pgtable(addr) |
| 165 | * Invalidate a single kernel mapping for address 'addr' on all |
| 166 | * CPUs, ensuring that any walk-cache entries associated with the |
| 167 | * translation are also invalidated. |
| 168 | * |
| 169 | * __flush_tlb_range(vma, start, end, stride, last_level) |
| 170 | * Invalidate the virtual-address range '[start, end)' on all |
| 171 | * CPUs for the user address space corresponding to 'vma->mm'. |
| 172 | * The invalidation operations are issued at a granularity |
| 173 | * determined by 'stride' and only affect any walk-cache entries |
| 174 | * if 'last_level' is equal to false. |
| 175 | * |
| 176 | * |
| 177 | * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented |
| 178 | * on top of these routines, since that is our interface to the mmu_gather |
| 179 | * API as used by munmap() and friends. |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 180 | */ |
Will Deacon | 8e63d38 | 2015-10-06 18:46:23 +0100 | [diff] [blame] | 181 | static inline void local_flush_tlb_all(void) |
| 182 | { |
| 183 | dsb(nshst); |
Mark Rutland | db68f3e | 2016-09-13 11:16:06 +0100 | [diff] [blame] | 184 | __tlbi(vmalle1); |
Will Deacon | 8e63d38 | 2015-10-06 18:46:23 +0100 | [diff] [blame] | 185 | dsb(nsh); |
| 186 | isb(); |
| 187 | } |
| 188 | |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 189 | static inline void flush_tlb_all(void) |
| 190 | { |
Will Deacon | 98f7685 | 2014-05-02 16:24:10 +0100 | [diff] [blame] | 191 | dsb(ishst); |
Mark Rutland | db68f3e | 2016-09-13 11:16:06 +0100 | [diff] [blame] | 192 | __tlbi(vmalle1is); |
Will Deacon | 98f7685 | 2014-05-02 16:24:10 +0100 | [diff] [blame] | 193 | dsb(ish); |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 194 | isb(); |
| 195 | } |
| 196 | |
| 197 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 198 | { |
Philip Elcan | 7f17049 | 2018-03-27 21:55:32 -0400 | [diff] [blame] | 199 | unsigned long asid = __TLBI_VADDR(0, ASID(mm)); |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 200 | |
Will Deacon | 98f7685 | 2014-05-02 16:24:10 +0100 | [diff] [blame] | 201 | dsb(ishst); |
Mark Rutland | db68f3e | 2016-09-13 11:16:06 +0100 | [diff] [blame] | 202 | __tlbi(aside1is, asid); |
Will Deacon | 9b0de86 | 2017-08-10 14:13:33 +0100 | [diff] [blame] | 203 | __tlbi_user(aside1is, asid); |
Will Deacon | 98f7685 | 2014-05-02 16:24:10 +0100 | [diff] [blame] | 204 | dsb(ish); |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 205 | } |
| 206 | |
Alex Van Brunt | 3403e56 | 2018-10-29 14:55:58 +0530 | [diff] [blame] | 207 | static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, |
| 208 | unsigned long uaddr) |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 209 | { |
Philip Elcan | 7f17049 | 2018-03-27 21:55:32 -0400 | [diff] [blame] | 210 | unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 211 | |
Will Deacon | 98f7685 | 2014-05-02 16:24:10 +0100 | [diff] [blame] | 212 | dsb(ishst); |
Zhenyu Ye | e735b98 | 2020-06-25 16:03:11 +0800 | [diff] [blame] | 213 | /* This function is only called on a small page */ |
| 214 | __tlbi_level(vale1is, addr, 3); |
| 215 | __tlbi_user_level(vale1is, addr, 3); |
Alex Van Brunt | 3403e56 | 2018-10-29 14:55:58 +0530 | [diff] [blame] | 216 | } |
| 217 | |
| 218 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
| 219 | unsigned long uaddr) |
| 220 | { |
| 221 | flush_tlb_page_nosync(vma, uaddr); |
Will Deacon | 98f7685 | 2014-05-02 16:24:10 +0100 | [diff] [blame] | 222 | dsb(ish); |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 223 | } |
| 224 | |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 225 | /* |
Mark Salter | 05ac653 | 2014-07-24 15:56:15 +0100 | [diff] [blame] | 226 | * This is meant to avoid soft lock-ups on large TLB flushing ranges and not |
| 227 | * necessarily a performance improvement. |
| 228 | */ |
Will Deacon | 3d65b6b | 2018-11-19 18:08:49 +0000 | [diff] [blame] | 229 | #define MAX_TLBI_OPS PTRS_PER_PTE |
Mark Salter | 05ac653 | 2014-07-24 15:56:15 +0100 | [diff] [blame] | 230 | |
Catalin Marinas | 4150e50 | 2015-03-18 11:28:06 +0000 | [diff] [blame] | 231 | static inline void __flush_tlb_range(struct vm_area_struct *vma, |
| 232 | unsigned long start, unsigned long end, |
Zhenyu Ye | c4ab2cb | 2020-06-25 16:03:13 +0800 | [diff] [blame^] | 233 | unsigned long stride, bool last_level, |
| 234 | int tlb_level) |
Mark Salter | 05ac653 | 2014-07-24 15:56:15 +0100 | [diff] [blame] | 235 | { |
Philip Elcan | 7f17049 | 2018-03-27 21:55:32 -0400 | [diff] [blame] | 236 | unsigned long asid = ASID(vma->vm_mm); |
Catalin Marinas | da4e733 | 2015-07-24 09:59:55 +0100 | [diff] [blame] | 237 | unsigned long addr; |
| 238 | |
Will Deacon | 01d5748 | 2019-06-11 12:47:34 +0100 | [diff] [blame] | 239 | start = round_down(start, stride); |
| 240 | end = round_up(end, stride); |
| 241 | |
Will Deacon | 3d65b6b | 2018-11-19 18:08:49 +0000 | [diff] [blame] | 242 | if ((end - start) >= (MAX_TLBI_OPS * stride)) { |
Mark Salter | 05ac653 | 2014-07-24 15:56:15 +0100 | [diff] [blame] | 243 | flush_tlb_mm(vma->vm_mm); |
Catalin Marinas | da4e733 | 2015-07-24 09:59:55 +0100 | [diff] [blame] | 244 | return; |
| 245 | } |
| 246 | |
Will Deacon | 67a902a | 2018-08-23 19:26:21 +0100 | [diff] [blame] | 247 | /* Convert the stride into units of 4k */ |
| 248 | stride >>= 12; |
| 249 | |
Philip Elcan | 7f17049 | 2018-03-27 21:55:32 -0400 | [diff] [blame] | 250 | start = __TLBI_VADDR(start, asid); |
| 251 | end = __TLBI_VADDR(end, asid); |
Catalin Marinas | da4e733 | 2015-07-24 09:59:55 +0100 | [diff] [blame] | 252 | |
| 253 | dsb(ishst); |
Will Deacon | 67a902a | 2018-08-23 19:26:21 +0100 | [diff] [blame] | 254 | for (addr = start; addr < end; addr += stride) { |
Will Deacon | 9b0de86 | 2017-08-10 14:13:33 +0100 | [diff] [blame] | 255 | if (last_level) { |
Zhenyu Ye | c4ab2cb | 2020-06-25 16:03:13 +0800 | [diff] [blame^] | 256 | __tlbi_level(vale1is, addr, tlb_level); |
| 257 | __tlbi_user_level(vale1is, addr, tlb_level); |
Will Deacon | 9b0de86 | 2017-08-10 14:13:33 +0100 | [diff] [blame] | 258 | } else { |
Zhenyu Ye | c4ab2cb | 2020-06-25 16:03:13 +0800 | [diff] [blame^] | 259 | __tlbi_level(vae1is, addr, tlb_level); |
| 260 | __tlbi_user_level(vae1is, addr, tlb_level); |
Will Deacon | 9b0de86 | 2017-08-10 14:13:33 +0100 | [diff] [blame] | 261 | } |
Catalin Marinas | 4150e50 | 2015-03-18 11:28:06 +0000 | [diff] [blame] | 262 | } |
Catalin Marinas | da4e733 | 2015-07-24 09:59:55 +0100 | [diff] [blame] | 263 | dsb(ish); |
Mark Salter | 05ac653 | 2014-07-24 15:56:15 +0100 | [diff] [blame] | 264 | } |
| 265 | |
Catalin Marinas | 4150e50 | 2015-03-18 11:28:06 +0000 | [diff] [blame] | 266 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
| 267 | unsigned long start, unsigned long end) |
| 268 | { |
Will Deacon | d8289d3 | 2018-08-23 19:08:15 +0100 | [diff] [blame] | 269 | /* |
| 270 | * We cannot use leaf-only invalidation here, since we may be invalidating |
| 271 | * table entries as part of collapsing hugepages or moving page tables. |
Zhenyu Ye | c4ab2cb | 2020-06-25 16:03:13 +0800 | [diff] [blame^] | 272 | * Set the tlb_level to 0 because we can not get enough information here. |
Will Deacon | d8289d3 | 2018-08-23 19:08:15 +0100 | [diff] [blame] | 273 | */ |
Zhenyu Ye | c4ab2cb | 2020-06-25 16:03:13 +0800 | [diff] [blame^] | 274 | __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0); |
Catalin Marinas | 4150e50 | 2015-03-18 11:28:06 +0000 | [diff] [blame] | 275 | } |
| 276 | |
Mark Salter | 05ac653 | 2014-07-24 15:56:15 +0100 | [diff] [blame] | 277 | static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 278 | { |
Catalin Marinas | da4e733 | 2015-07-24 09:59:55 +0100 | [diff] [blame] | 279 | unsigned long addr; |
| 280 | |
Will Deacon | 67a902a | 2018-08-23 19:26:21 +0100 | [diff] [blame] | 281 | if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) { |
Mark Salter | 05ac653 | 2014-07-24 15:56:15 +0100 | [diff] [blame] | 282 | flush_tlb_all(); |
Catalin Marinas | da4e733 | 2015-07-24 09:59:55 +0100 | [diff] [blame] | 283 | return; |
| 284 | } |
| 285 | |
Philip Elcan | 7f17049 | 2018-03-27 21:55:32 -0400 | [diff] [blame] | 286 | start = __TLBI_VADDR(start, 0); |
| 287 | end = __TLBI_VADDR(end, 0); |
Catalin Marinas | da4e733 | 2015-07-24 09:59:55 +0100 | [diff] [blame] | 288 | |
| 289 | dsb(ishst); |
| 290 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) |
Will Deacon | 6899a4c | 2018-08-22 21:23:05 +0100 | [diff] [blame] | 291 | __tlbi(vaale1is, addr); |
Catalin Marinas | da4e733 | 2015-07-24 09:59:55 +0100 | [diff] [blame] | 292 | dsb(ish); |
| 293 | isb(); |
Mark Salter | 05ac653 | 2014-07-24 15:56:15 +0100 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | /* |
Catalin Marinas | 285994a | 2015-03-11 12:20:39 +0000 | [diff] [blame] | 297 | * Used to invalidate the TLB (walk caches) corresponding to intermediate page |
| 298 | * table levels (pgd/pud/pmd). |
| 299 | */ |
Chintan Pandya | 05f2d2f | 2018-06-06 12:31:20 +0530 | [diff] [blame] | 300 | static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) |
| 301 | { |
| 302 | unsigned long addr = __TLBI_VADDR(kaddr, 0); |
| 303 | |
Will Deacon | 45a284b | 2018-08-22 21:40:30 +0100 | [diff] [blame] | 304 | dsb(ishst); |
Chintan Pandya | 05f2d2f | 2018-06-06 12:31:20 +0530 | [diff] [blame] | 305 | __tlbi(vaae1is, addr); |
| 306 | dsb(ish); |
Will Deacon | 51696d3 | 2019-08-22 15:03:45 +0100 | [diff] [blame] | 307 | isb(); |
Chintan Pandya | 05f2d2f | 2018-06-06 12:31:20 +0530 | [diff] [blame] | 308 | } |
Catalin Marinas | 58d0ba5 | 2012-03-05 11:49:28 +0000 | [diff] [blame] | 309 | #endif |
| 310 | |
| 311 | #endif |