blob: 3505f6fbfca3c89e9540c666d8f004ca4905d641 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Catalin Marinas58d0ba52012-03-05 11:49:28 +00002/*
3 * Based on arch/arm/include/asm/tlbflush.h
4 *
5 * Copyright (C) 1999-2003 Russell King
6 * Copyright (C) 2012 ARM Ltd.
Catalin Marinas58d0ba52012-03-05 11:49:28 +00007 */
8#ifndef __ASM_TLBFLUSH_H
9#define __ASM_TLBFLUSH_H
10
11#ifndef __ASSEMBLY__
12
Marc Zyngierc10bc622019-01-02 10:21:29 +000013#include <linux/bitfield.h>
Alex Van Brunt3403e562018-10-29 14:55:58 +053014#include <linux/mm_types.h>
Catalin Marinas58d0ba52012-03-05 11:49:28 +000015#include <linux/sched.h>
16#include <asm/cputype.h>
Will Deacon9b0de862017-08-10 14:13:33 +010017#include <asm/mmu.h>
Catalin Marinas58d0ba52012-03-05 11:49:28 +000018
Catalin Marinas58d0ba52012-03-05 11:49:28 +000019/*
Mark Rutlanddb68f3e2016-09-13 11:16:06 +010020 * Raw TLBI operations.
21 *
22 * Where necessary, use the __tlbi() macro to avoid asm()
23 * boilerplate. Drivers and most kernel code should use the TLB
24 * management routines in preference to the macro below.
25 *
26 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
27 * on whether a particular TLBI operation takes an argument or
28 * not. The macros handles invoking the asm with or without the
29 * register argument as appropriate.
30 */
Christopher Covingtond9ff80f2017-01-31 12:50:19 -050031#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
32 ALTERNATIVE("nop\n nop", \
33 "dsb ish\n tlbi " #op, \
34 ARM64_WORKAROUND_REPEAT_TLBI, \
Catalin Marinasce8c80c2018-11-19 11:27:28 +000035 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
Christopher Covingtond9ff80f2017-01-31 12:50:19 -050036 : : )
37
38#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
39 ALTERNATIVE("nop\n nop", \
40 "dsb ish\n tlbi " #op ", %0", \
41 ARM64_WORKAROUND_REPEAT_TLBI, \
Catalin Marinasce8c80c2018-11-19 11:27:28 +000042 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
Christopher Covingtond9ff80f2017-01-31 12:50:19 -050043 : : "r" (arg))
44
45#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
Mark Rutlanddb68f3e2016-09-13 11:16:06 +010046
47#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
48
Will Deacon9b0de862017-08-10 14:13:33 +010049#define __tlbi_user(op, arg) do { \
50 if (arm64_kernel_unmapped_at_el0()) \
51 __tlbi(op, (arg) | USER_ASID_FLAG); \
52} while (0)
53
Philip Elcan7f170492018-03-27 21:55:32 -040054/* This macro creates a properly formatted VA operand for the TLBI */
55#define __TLBI_VADDR(addr, asid) \
56 ({ \
57 unsigned long __ta = (addr) >> 12; \
58 __ta &= GENMASK_ULL(43, 0); \
59 __ta |= (unsigned long)(asid) << 48; \
60 __ta; \
61 })
62
Mark Rutlanddb68f3e2016-09-13 11:16:06 +010063/*
Marc Zyngierc10bc622019-01-02 10:21:29 +000064 * Level-based TLBI operations.
65 *
66 * When ARMv8.4-TTL exists, TLBI operations take an additional hint for
67 * the level at which the invalidation must take place. If the level is
68 * wrong, no invalidation may take place. In the case where the level
69 * cannot be easily determined, a 0 value for the level parameter will
70 * perform a non-hinted invalidation.
71 *
72 * For Stage-2 invalidation, use the level values provided to that effect
73 * in asm/stage2_pgtable.h.
74 */
75#define TLBI_TTL_MASK GENMASK_ULL(47, 44)
76#define TLBI_TTL_TG_4K 1
77#define TLBI_TTL_TG_16K 2
78#define TLBI_TTL_TG_64K 3
79
80#define __tlbi_level(op, addr, level) \
81 do { \
82 u64 arg = addr; \
83 \
84 if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \
85 level) { \
86 u64 ttl = level & 3; \
87 \
88 switch (PAGE_SIZE) { \
89 case SZ_4K: \
90 ttl |= TLBI_TTL_TG_4K << 2; \
91 break; \
92 case SZ_16K: \
93 ttl |= TLBI_TTL_TG_16K << 2; \
94 break; \
95 case SZ_64K: \
96 ttl |= TLBI_TTL_TG_64K << 2; \
97 break; \
98 } \
99 \
100 arg &= ~TLBI_TTL_MASK; \
101 arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
102 } \
103 \
104 __tlbi(op, arg); \
105 } while(0)
106
Zhenyu Yee735b982020-06-25 16:03:11 +0800107#define __tlbi_user_level(op, arg, level) do { \
108 if (arm64_kernel_unmapped_at_el0()) \
109 __tlbi_level(op, (arg | USER_ASID_FLAG), level); \
110} while (0)
111
Marc Zyngierc10bc622019-01-02 10:21:29 +0000112/*
Will Deacon7f088722018-08-28 14:52:17 +0100113 * TLB Invalidation
114 * ================
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000115 *
Will Deacon7f088722018-08-28 14:52:17 +0100116 * This header file implements the low-level TLB invalidation routines
117 * (sometimes referred to as "flushing" in the kernel) for arm64.
118 *
119 * Every invalidation operation uses the following template:
120 *
121 * DSB ISHST // Ensure prior page-table updates have completed
122 * TLBI ... // Invalidate the TLB
123 * DSB ISH // Ensure the TLB invalidation has completed
124 * if (invalidated kernel mappings)
125 * ISB // Discard any instructions fetched from the old mapping
126 *
127 *
128 * The following functions form part of the "core" TLB invalidation API,
129 * as documented in Documentation/core-api/cachetlb.rst:
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000130 *
131 * flush_tlb_all()
Will Deacon7f088722018-08-28 14:52:17 +0100132 * Invalidate the entire TLB (kernel + user) on all CPUs
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000133 *
134 * flush_tlb_mm(mm)
Will Deacon7f088722018-08-28 14:52:17 +0100135 * Invalidate an entire user address space on all CPUs.
136 * The 'mm' argument identifies the ASID to invalidate.
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000137 *
Will Deacon7f088722018-08-28 14:52:17 +0100138 * flush_tlb_range(vma, start, end)
139 * Invalidate the virtual-address range '[start, end)' on all
140 * CPUs for the user address space corresponding to 'vma->mm'.
141 * Note that this operation also invalidates any walk-cache
142 * entries associated with translations for the specified address
143 * range.
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000144 *
Will Deacon7f088722018-08-28 14:52:17 +0100145 * flush_tlb_kernel_range(start, end)
146 * Same as flush_tlb_range(..., start, end), but applies to
147 * kernel mappings rather than a particular user address space.
148 * Whilst not explicitly documented, this function is used when
149 * unmapping pages from vmalloc/io space.
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000150 *
Will Deacon7f088722018-08-28 14:52:17 +0100151 * flush_tlb_page(vma, addr)
152 * Invalidate a single user mapping for address 'addr' in the
153 * address space corresponding to 'vma->mm'. Note that this
154 * operation only invalidates a single, last-level page-table
155 * entry and therefore does not affect any walk-caches.
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000156 *
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000157 *
Will Deacon7f088722018-08-28 14:52:17 +0100158 * Next, we have some undocumented invalidation routines that you probably
159 * don't want to call unless you know what you're doing:
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000160 *
Will Deacon7f088722018-08-28 14:52:17 +0100161 * local_flush_tlb_all()
162 * Same as flush_tlb_all(), but only applies to the calling CPU.
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000163 *
Will Deacon7f088722018-08-28 14:52:17 +0100164 * __flush_tlb_kernel_pgtable(addr)
165 * Invalidate a single kernel mapping for address 'addr' on all
166 * CPUs, ensuring that any walk-cache entries associated with the
167 * translation are also invalidated.
168 *
169 * __flush_tlb_range(vma, start, end, stride, last_level)
170 * Invalidate the virtual-address range '[start, end)' on all
171 * CPUs for the user address space corresponding to 'vma->mm'.
172 * The invalidation operations are issued at a granularity
173 * determined by 'stride' and only affect any walk-cache entries
174 * if 'last_level' is equal to false.
175 *
176 *
177 * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
178 * on top of these routines, since that is our interface to the mmu_gather
179 * API as used by munmap() and friends.
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000180 */
Will Deacon8e63d382015-10-06 18:46:23 +0100181static inline void local_flush_tlb_all(void)
182{
183 dsb(nshst);
Mark Rutlanddb68f3e2016-09-13 11:16:06 +0100184 __tlbi(vmalle1);
Will Deacon8e63d382015-10-06 18:46:23 +0100185 dsb(nsh);
186 isb();
187}
188
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000189static inline void flush_tlb_all(void)
190{
Will Deacon98f76852014-05-02 16:24:10 +0100191 dsb(ishst);
Mark Rutlanddb68f3e2016-09-13 11:16:06 +0100192 __tlbi(vmalle1is);
Will Deacon98f76852014-05-02 16:24:10 +0100193 dsb(ish);
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000194 isb();
195}
196
197static inline void flush_tlb_mm(struct mm_struct *mm)
198{
Philip Elcan7f170492018-03-27 21:55:32 -0400199 unsigned long asid = __TLBI_VADDR(0, ASID(mm));
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000200
Will Deacon98f76852014-05-02 16:24:10 +0100201 dsb(ishst);
Mark Rutlanddb68f3e2016-09-13 11:16:06 +0100202 __tlbi(aside1is, asid);
Will Deacon9b0de862017-08-10 14:13:33 +0100203 __tlbi_user(aside1is, asid);
Will Deacon98f76852014-05-02 16:24:10 +0100204 dsb(ish);
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000205}
206
Alex Van Brunt3403e562018-10-29 14:55:58 +0530207static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
208 unsigned long uaddr)
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000209{
Philip Elcan7f170492018-03-27 21:55:32 -0400210 unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000211
Will Deacon98f76852014-05-02 16:24:10 +0100212 dsb(ishst);
Zhenyu Yee735b982020-06-25 16:03:11 +0800213 /* This function is only called on a small page */
214 __tlbi_level(vale1is, addr, 3);
215 __tlbi_user_level(vale1is, addr, 3);
Alex Van Brunt3403e562018-10-29 14:55:58 +0530216}
217
218static inline void flush_tlb_page(struct vm_area_struct *vma,
219 unsigned long uaddr)
220{
221 flush_tlb_page_nosync(vma, uaddr);
Will Deacon98f76852014-05-02 16:24:10 +0100222 dsb(ish);
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000223}
224
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000225/*
Mark Salter05ac6532014-07-24 15:56:15 +0100226 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
227 * necessarily a performance improvement.
228 */
Will Deacon3d65b6b2018-11-19 18:08:49 +0000229#define MAX_TLBI_OPS PTRS_PER_PTE
Mark Salter05ac6532014-07-24 15:56:15 +0100230
Catalin Marinas4150e502015-03-18 11:28:06 +0000231static inline void __flush_tlb_range(struct vm_area_struct *vma,
232 unsigned long start, unsigned long end,
Zhenyu Yec4ab2cb2020-06-25 16:03:13 +0800233 unsigned long stride, bool last_level,
234 int tlb_level)
Mark Salter05ac6532014-07-24 15:56:15 +0100235{
Philip Elcan7f170492018-03-27 21:55:32 -0400236 unsigned long asid = ASID(vma->vm_mm);
Catalin Marinasda4e7332015-07-24 09:59:55 +0100237 unsigned long addr;
238
Will Deacon01d57482019-06-11 12:47:34 +0100239 start = round_down(start, stride);
240 end = round_up(end, stride);
241
Will Deacon3d65b6b2018-11-19 18:08:49 +0000242 if ((end - start) >= (MAX_TLBI_OPS * stride)) {
Mark Salter05ac6532014-07-24 15:56:15 +0100243 flush_tlb_mm(vma->vm_mm);
Catalin Marinasda4e7332015-07-24 09:59:55 +0100244 return;
245 }
246
Will Deacon67a902a2018-08-23 19:26:21 +0100247 /* Convert the stride into units of 4k */
248 stride >>= 12;
249
Philip Elcan7f170492018-03-27 21:55:32 -0400250 start = __TLBI_VADDR(start, asid);
251 end = __TLBI_VADDR(end, asid);
Catalin Marinasda4e7332015-07-24 09:59:55 +0100252
253 dsb(ishst);
Will Deacon67a902a2018-08-23 19:26:21 +0100254 for (addr = start; addr < end; addr += stride) {
Will Deacon9b0de862017-08-10 14:13:33 +0100255 if (last_level) {
Zhenyu Yec4ab2cb2020-06-25 16:03:13 +0800256 __tlbi_level(vale1is, addr, tlb_level);
257 __tlbi_user_level(vale1is, addr, tlb_level);
Will Deacon9b0de862017-08-10 14:13:33 +0100258 } else {
Zhenyu Yec4ab2cb2020-06-25 16:03:13 +0800259 __tlbi_level(vae1is, addr, tlb_level);
260 __tlbi_user_level(vae1is, addr, tlb_level);
Will Deacon9b0de862017-08-10 14:13:33 +0100261 }
Catalin Marinas4150e502015-03-18 11:28:06 +0000262 }
Catalin Marinasda4e7332015-07-24 09:59:55 +0100263 dsb(ish);
Mark Salter05ac6532014-07-24 15:56:15 +0100264}
265
Catalin Marinas4150e502015-03-18 11:28:06 +0000266static inline void flush_tlb_range(struct vm_area_struct *vma,
267 unsigned long start, unsigned long end)
268{
Will Deacond8289d32018-08-23 19:08:15 +0100269 /*
270 * We cannot use leaf-only invalidation here, since we may be invalidating
271 * table entries as part of collapsing hugepages or moving page tables.
Zhenyu Yec4ab2cb2020-06-25 16:03:13 +0800272 * Set the tlb_level to 0 because we can not get enough information here.
Will Deacond8289d32018-08-23 19:08:15 +0100273 */
Zhenyu Yec4ab2cb2020-06-25 16:03:13 +0800274 __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
Catalin Marinas4150e502015-03-18 11:28:06 +0000275}
276
Mark Salter05ac6532014-07-24 15:56:15 +0100277static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
278{
Catalin Marinasda4e7332015-07-24 09:59:55 +0100279 unsigned long addr;
280
Will Deacon67a902a2018-08-23 19:26:21 +0100281 if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
Mark Salter05ac6532014-07-24 15:56:15 +0100282 flush_tlb_all();
Catalin Marinasda4e7332015-07-24 09:59:55 +0100283 return;
284 }
285
Philip Elcan7f170492018-03-27 21:55:32 -0400286 start = __TLBI_VADDR(start, 0);
287 end = __TLBI_VADDR(end, 0);
Catalin Marinasda4e7332015-07-24 09:59:55 +0100288
289 dsb(ishst);
290 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
Will Deacon6899a4c2018-08-22 21:23:05 +0100291 __tlbi(vaale1is, addr);
Catalin Marinasda4e7332015-07-24 09:59:55 +0100292 dsb(ish);
293 isb();
Mark Salter05ac6532014-07-24 15:56:15 +0100294}
295
296/*
Catalin Marinas285994a2015-03-11 12:20:39 +0000297 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
298 * table levels (pgd/pud/pmd).
299 */
Chintan Pandya05f2d2f2018-06-06 12:31:20 +0530300static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
301{
302 unsigned long addr = __TLBI_VADDR(kaddr, 0);
303
Will Deacon45a284b2018-08-22 21:40:30 +0100304 dsb(ishst);
Chintan Pandya05f2d2f2018-06-06 12:31:20 +0530305 __tlbi(vaae1is, addr);
306 dsb(ish);
Will Deacon51696d32019-08-22 15:03:45 +0100307 isb();
Chintan Pandya05f2d2f2018-06-06 12:31:20 +0530308}
Catalin Marinas58d0ba52012-03-05 11:49:28 +0000309#endif
310
311#endif