Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 2 | #ifndef _ASM_X86_TLBFLUSH_H |
| 3 | #define _ASM_X86_TLBFLUSH_H |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 4 | |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/sched.h> |
| 7 | |
| 8 | #include <asm/processor.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 9 | #include <asm/cpufeature.h> |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 10 | #include <asm/special_insns.h> |
Andy Lutomirski | ce4a4e56 | 2017-05-28 10:00:14 -0700 | [diff] [blame] | 11 | #include <asm/smp.h> |
Peter Zijlstra | 1a3b0ca | 2017-12-05 13:34:47 +0100 | [diff] [blame] | 12 | #include <asm/invpcid.h> |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 13 | #include <asm/pti.h> |
| 14 | #include <asm/processor-flags.h> |
Andy Lutomirski | 060a402 | 2016-01-29 11:42:57 -0800 | [diff] [blame] | 15 | |
Peter Zijlstra | 0a126ab | 2017-12-05 13:34:53 +0100 | [diff] [blame] | 16 | /* |
| 17 | * The x86 feature is called PCID (Process Context IDentifier). It is similar |
| 18 | * to what is traditionally called ASID on the RISC processors. |
| 19 | * |
| 20 | * We don't use the traditional ASID implementation, where each process/mm gets |
| 21 | * its own ASID and flush/restart when we run out of ASID space. |
| 22 | * |
| 23 | * Instead we have a small per-cpu array of ASIDs and cache the last few mm's |
| 24 | * that came by on this CPU, allowing cheaper switch_mm between processes on |
| 25 | * this CPU. |
| 26 | * |
| 27 | * We end up with different spaces for different things. To avoid confusion we |
| 28 | * use different names for each of them: |
| 29 | * |
| 30 | * ASID - [0, TLB_NR_DYN_ASIDS-1] |
| 31 | * the canonical identifier for an mm |
| 32 | * |
| 33 | * kPCID - [1, TLB_NR_DYN_ASIDS] |
| 34 | * the value we write into the PCID part of CR3; corresponds to the |
| 35 | * ASID+1, because PCID 0 is special. |
| 36 | * |
| 37 | * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] |
| 38 | * for KPTI each mm has two address spaces and thus needs two |
| 39 | * PCID values, but we can still do with a single ASID denomination |
| 40 | * for each mm. Corresponds to kPCID + 2048. |
| 41 | * |
| 42 | */ |
Andy Lutomirski | f39681e | 2017-06-29 08:53:15 -0700 | [diff] [blame] | 43 | |
Dave Hansen | cb0a914 | 2017-12-04 15:07:55 +0100 | [diff] [blame] | 44 | /* There are 12 bits of space for ASIDS in CR3 */ |
| 45 | #define CR3_HW_ASID_BITS 12 |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 46 | |
Dave Hansen | cb0a914 | 2017-12-04 15:07:55 +0100 | [diff] [blame] | 47 | /* |
| 48 | * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for |
| 49 | * user/kernel switches |
| 50 | */ |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 51 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
| 52 | # define PTI_CONSUMED_PCID_BITS 1 |
| 53 | #else |
| 54 | # define PTI_CONSUMED_PCID_BITS 0 |
| 55 | #endif |
Dave Hansen | cb0a914 | 2017-12-04 15:07:55 +0100 | [diff] [blame] | 56 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 57 | #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) |
| 58 | |
Dave Hansen | cb0a914 | 2017-12-04 15:07:55 +0100 | [diff] [blame] | 59 | /* |
| 60 | * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account |
Peter Zijlstra | 0a126ab | 2017-12-05 13:34:53 +0100 | [diff] [blame] | 61 | * for them being zero-based. Another -1 is because PCID 0 is reserved for |
Dave Hansen | cb0a914 | 2017-12-04 15:07:55 +0100 | [diff] [blame] | 62 | * use by non-PCID-aware users. |
| 63 | */ |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 64 | #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) |
Dave Hansen | cb0a914 | 2017-12-04 15:07:55 +0100 | [diff] [blame] | 65 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 66 | /* |
| 67 | * 6 because 6 should be plenty and struct tlb_state will fit in two cache |
| 68 | * lines. |
| 69 | */ |
| 70 | #define TLB_NR_DYN_ASIDS 6 |
Andy Lutomirski | f39681e | 2017-06-29 08:53:15 -0700 | [diff] [blame] | 71 | |
Peter Zijlstra | 0a126ab | 2017-12-05 13:34:53 +0100 | [diff] [blame] | 72 | /* |
| 73 | * Given @asid, compute kPCID |
| 74 | */ |
Dave Hansen | dd95f1a | 2017-12-04 15:07:56 +0100 | [diff] [blame] | 75 | static inline u16 kern_pcid(u16 asid) |
| 76 | { |
| 77 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 78 | |
| 79 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
Dave Hansen | dd95f1a | 2017-12-04 15:07:56 +0100 | [diff] [blame] | 80 | /* |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 81 | * Make sure that the dynamic ASID space does not confict with the |
| 82 | * bit we are using to switch between user and kernel ASIDs. |
| 83 | */ |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 84 | BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 85 | |
| 86 | /* |
| 87 | * The ASID being passed in here should have respected the |
| 88 | * MAX_ASID_AVAILABLE and thus never have the switch bit set. |
| 89 | */ |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 90 | VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 91 | #endif |
| 92 | /* |
| 93 | * The dynamically-assigned ASIDs that get passed in are small |
| 94 | * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set, |
| 95 | * so do not bother to clear it. |
| 96 | * |
Dave Hansen | dd95f1a | 2017-12-04 15:07:56 +0100 | [diff] [blame] | 97 | * If PCID is on, ASID-aware code paths put the ASID+1 into the |
| 98 | * PCID bits. This serves two purposes. It prevents a nasty |
| 99 | * situation in which PCID-unaware code saves CR3, loads some other |
| 100 | * value (with PCID == 0), and then restores CR3, thus corrupting |
| 101 | * the TLB for ASID 0 if the saved ASID was nonzero. It also means |
| 102 | * that any bugs involving loading a PCID-enabled CR3 with |
| 103 | * CR4.PCIDE off will trigger deterministically. |
| 104 | */ |
| 105 | return asid + 1; |
| 106 | } |
| 107 | |
Dave Hansen | 6cff64b | 2017-12-04 15:08:01 +0100 | [diff] [blame] | 108 | /* |
Peter Zijlstra | 0a126ab | 2017-12-05 13:34:53 +0100 | [diff] [blame] | 109 | * Given @asid, compute uPCID |
Dave Hansen | 6cff64b | 2017-12-04 15:08:01 +0100 | [diff] [blame] | 110 | */ |
| 111 | static inline u16 user_pcid(u16 asid) |
| 112 | { |
| 113 | u16 ret = kern_pcid(asid); |
| 114 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 115 | ret |= 1 << X86_CR3_PTI_PCID_USER_BIT; |
Dave Hansen | 6cff64b | 2017-12-04 15:08:01 +0100 | [diff] [blame] | 116 | #endif |
| 117 | return ret; |
| 118 | } |
| 119 | |
Dave Hansen | 50fb83a6 | 2017-12-04 15:07:54 +0100 | [diff] [blame] | 120 | struct pgd_t; |
| 121 | static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) |
| 122 | { |
| 123 | if (static_cpu_has(X86_FEATURE_PCID)) { |
Dave Hansen | dd95f1a | 2017-12-04 15:07:56 +0100 | [diff] [blame] | 124 | return __sme_pa(pgd) | kern_pcid(asid); |
Dave Hansen | 50fb83a6 | 2017-12-04 15:07:54 +0100 | [diff] [blame] | 125 | } else { |
| 126 | VM_WARN_ON_ONCE(asid != 0); |
| 127 | return __sme_pa(pgd); |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) |
| 132 | { |
Dave Hansen | cb0a914 | 2017-12-04 15:07:55 +0100 | [diff] [blame] | 133 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
Dave Hansen | dd95f1a | 2017-12-04 15:07:56 +0100 | [diff] [blame] | 134 | VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID)); |
| 135 | return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | #ifdef CONFIG_PARAVIRT |
| 139 | #include <asm/paravirt.h> |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 140 | #else |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 141 | #define __flush_tlb() __native_flush_tlb() |
| 142 | #define __flush_tlb_global() __native_flush_tlb_global() |
| 143 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 144 | #endif |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 145 | |
Andy Lutomirski | 4e57b94 | 2017-10-14 09:59:50 -0700 | [diff] [blame] | 146 | static inline bool tlb_defer_switch_to_init_mm(void) |
| 147 | { |
Andy Lutomirski | 7ac7f2c | 2017-10-14 09:59:51 -0700 | [diff] [blame] | 148 | /* |
| 149 | * If we have PCID, then switching to init_mm is reasonably |
| 150 | * fast. If we don't have PCID, then switching to init_mm is |
| 151 | * quite slow, so we try to defer it in the hopes that we can |
| 152 | * avoid it entirely. The latter approach runs the risk of |
| 153 | * receiving otherwise unnecessary IPIs. |
| 154 | * |
| 155 | * This choice is just a heuristic. The tlb code can handle this |
| 156 | * function returning true or false regardless of whether we have |
| 157 | * PCID. |
| 158 | */ |
| 159 | return !static_cpu_has(X86_FEATURE_PCID); |
Andy Lutomirski | 4e57b94 | 2017-10-14 09:59:50 -0700 | [diff] [blame] | 160 | } |
Andy Lutomirski | b956575 | 2017-10-09 09:50:49 -0700 | [diff] [blame] | 161 | |
Andy Lutomirski | b0579ad | 2017-06-29 08:53:16 -0700 | [diff] [blame] | 162 | struct tlb_context { |
| 163 | u64 ctx_id; |
| 164 | u64 tlb_gen; |
| 165 | }; |
| 166 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 167 | struct tlb_state { |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 168 | /* |
| 169 | * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts |
| 170 | * are on. This means that it may not match current->active_mm, |
| 171 | * which will contain the previous user mm when we're in lazy TLB |
| 172 | * mode even if we've already switched back to swapper_pg_dir. |
| 173 | */ |
| 174 | struct mm_struct *loaded_mm; |
Andy Lutomirski | 10af623 | 2017-07-24 21:41:38 -0700 | [diff] [blame] | 175 | u16 loaded_mm_asid; |
| 176 | u16 next_asid; |
Tim Chen | 18bf3c3 | 2018-01-29 22:04:47 +0000 | [diff] [blame] | 177 | /* last user mm's ctx id */ |
| 178 | u64 last_ctx_id; |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 179 | |
| 180 | /* |
Andy Lutomirski | b956575 | 2017-10-09 09:50:49 -0700 | [diff] [blame] | 181 | * We can be in one of several states: |
| 182 | * |
| 183 | * - Actively using an mm. Our CPU's bit will be set in |
| 184 | * mm_cpumask(loaded_mm) and is_lazy == false; |
| 185 | * |
| 186 | * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit |
| 187 | * will not be set in mm_cpumask(&init_mm) and is_lazy == false. |
| 188 | * |
| 189 | * - Lazily using a real mm. loaded_mm != &init_mm, our bit |
| 190 | * is set in mm_cpumask(loaded_mm), but is_lazy == true. |
| 191 | * We're heuristically guessing that the CR3 load we |
| 192 | * skipped more than makes up for the overhead added by |
| 193 | * lazy mode. |
| 194 | */ |
| 195 | bool is_lazy; |
| 196 | |
| 197 | /* |
Dave Hansen | 2ea907c | 2017-12-04 15:07:57 +0100 | [diff] [blame] | 198 | * If set we changed the page tables in such a way that we |
| 199 | * needed an invalidation of all contexts (aka. PCIDs / ASIDs). |
| 200 | * This tells us to go invalidate all the non-loaded ctxs[] |
| 201 | * on the next context switch. |
| 202 | * |
| 203 | * The current ctx was kept up-to-date as it ran and does not |
| 204 | * need to be invalidated. |
| 205 | */ |
| 206 | bool invalidate_other; |
| 207 | |
| 208 | /* |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 209 | * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate |
| 210 | * the corresponding user PCID needs a flush next time we |
| 211 | * switch to it; see SWITCH_TO_USER_CR3. |
| 212 | */ |
| 213 | unsigned short user_pcid_flush_mask; |
| 214 | |
| 215 | /* |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 216 | * Access to this CR4 shadow and to H/W CR4 is protected by |
| 217 | * disabling interrupts when modifying either one. |
| 218 | */ |
| 219 | unsigned long cr4; |
Andy Lutomirski | b0579ad | 2017-06-29 08:53:16 -0700 | [diff] [blame] | 220 | |
| 221 | /* |
| 222 | * This is a list of all contexts that might exist in the TLB. |
Andy Lutomirski | 10af623 | 2017-07-24 21:41:38 -0700 | [diff] [blame] | 223 | * There is one per ASID that we use, and the ASID (what the |
| 224 | * CPU calls PCID) is the index into ctxts. |
Andy Lutomirski | b0579ad | 2017-06-29 08:53:16 -0700 | [diff] [blame] | 225 | * |
| 226 | * For each context, ctx_id indicates which mm the TLB's user |
| 227 | * entries came from. As an invariant, the TLB will never |
| 228 | * contain entries that are out-of-date as when that mm reached |
| 229 | * the tlb_gen in the list. |
| 230 | * |
| 231 | * To be clear, this means that it's legal for the TLB code to |
| 232 | * flush the TLB without updating tlb_gen. This can happen |
| 233 | * (for now, at least) due to paravirt remote flushes. |
Andy Lutomirski | 10af623 | 2017-07-24 21:41:38 -0700 | [diff] [blame] | 234 | * |
| 235 | * NB: context 0 is a bit special, since it's also used by |
| 236 | * various bits of init code. This is fine -- code that |
| 237 | * isn't aware of PCID will end up harmlessly flushing |
| 238 | * context 0. |
Andy Lutomirski | b0579ad | 2017-06-29 08:53:16 -0700 | [diff] [blame] | 239 | */ |
Andy Lutomirski | 10af623 | 2017-07-24 21:41:38 -0700 | [diff] [blame] | 240 | struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 241 | }; |
| 242 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
| 243 | |
| 244 | /* Initialize cr4 shadow for this CPU. */ |
| 245 | static inline void cr4_init_shadow(void) |
| 246 | { |
Andy Lutomirski | 1ef55be1 | 2016-09-29 12:48:12 -0700 | [diff] [blame] | 247 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 248 | } |
| 249 | |
Nadav Amit | 0c3292c | 2017-11-24 19:29:06 -0800 | [diff] [blame] | 250 | static inline void __cr4_set(unsigned long cr4) |
| 251 | { |
Nadav Amit | 9d0b623 | 2017-11-24 19:29:07 -0800 | [diff] [blame] | 252 | lockdep_assert_irqs_disabled(); |
Nadav Amit | 0c3292c | 2017-11-24 19:29:06 -0800 | [diff] [blame] | 253 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 254 | __write_cr4(cr4); |
| 255 | } |
| 256 | |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 257 | /* Set in this cpu's CR4. */ |
| 258 | static inline void cr4_set_bits(unsigned long mask) |
| 259 | { |
Nadav Amit | 9d0b623 | 2017-11-24 19:29:07 -0800 | [diff] [blame] | 260 | unsigned long cr4, flags; |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 261 | |
Nadav Amit | 9d0b623 | 2017-11-24 19:29:07 -0800 | [diff] [blame] | 262 | local_irq_save(flags); |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 263 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
Nadav Amit | 0c3292c | 2017-11-24 19:29:06 -0800 | [diff] [blame] | 264 | if ((cr4 | mask) != cr4) |
| 265 | __cr4_set(cr4 | mask); |
Nadav Amit | 9d0b623 | 2017-11-24 19:29:07 -0800 | [diff] [blame] | 266 | local_irq_restore(flags); |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | /* Clear in this cpu's CR4. */ |
| 270 | static inline void cr4_clear_bits(unsigned long mask) |
| 271 | { |
Nadav Amit | 9d0b623 | 2017-11-24 19:29:07 -0800 | [diff] [blame] | 272 | unsigned long cr4, flags; |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 273 | |
Nadav Amit | 9d0b623 | 2017-11-24 19:29:07 -0800 | [diff] [blame] | 274 | local_irq_save(flags); |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 275 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
Nadav Amit | 0c3292c | 2017-11-24 19:29:06 -0800 | [diff] [blame] | 276 | if ((cr4 & ~mask) != cr4) |
| 277 | __cr4_set(cr4 & ~mask); |
Nadav Amit | 9d0b623 | 2017-11-24 19:29:07 -0800 | [diff] [blame] | 278 | local_irq_restore(flags); |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 279 | } |
| 280 | |
Nadav Amit | 9d0b623 | 2017-11-24 19:29:07 -0800 | [diff] [blame] | 281 | static inline void cr4_toggle_bits_irqsoff(unsigned long mask) |
Thomas Gleixner | 5a92015 | 2017-02-14 00:11:04 -0800 | [diff] [blame] | 282 | { |
| 283 | unsigned long cr4; |
| 284 | |
| 285 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
Nadav Amit | 0c3292c | 2017-11-24 19:29:06 -0800 | [diff] [blame] | 286 | __cr4_set(cr4 ^ mask); |
Thomas Gleixner | 5a92015 | 2017-02-14 00:11:04 -0800 | [diff] [blame] | 287 | } |
| 288 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 289 | /* Read the CR4 shadow. */ |
| 290 | static inline unsigned long cr4_read_shadow(void) |
| 291 | { |
| 292 | return this_cpu_read(cpu_tlbstate.cr4); |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 293 | } |
| 294 | |
| 295 | /* |
Dave Hansen | 2ea907c | 2017-12-04 15:07:57 +0100 | [diff] [blame] | 296 | * Mark all other ASIDs as invalid, preserves the current. |
| 297 | */ |
| 298 | static inline void invalidate_other_asid(void) |
| 299 | { |
| 300 | this_cpu_write(cpu_tlbstate.invalidate_other, true); |
| 301 | } |
| 302 | |
| 303 | /* |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 304 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB |
| 305 | * enable and PPro Global page enable), so that any CPU's that boot |
| 306 | * up after us can get the correct flags. This should only be used |
| 307 | * during boot on the boot cpu. |
| 308 | */ |
| 309 | extern unsigned long mmu_cr4_features; |
| 310 | extern u32 *trampoline_cr4_features; |
| 311 | |
| 312 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) |
| 313 | { |
| 314 | mmu_cr4_features |= mask; |
| 315 | if (trampoline_cr4_features) |
| 316 | *trampoline_cr4_features = mmu_cr4_features; |
| 317 | cr4_set_bits(mask); |
| 318 | } |
| 319 | |
Andy Lutomirski | 72c0098 | 2017-09-06 19:54:53 -0700 | [diff] [blame] | 320 | extern void initialize_tlbstate_and_flush(void); |
| 321 | |
Peter Zijlstra | 3f67af5 | 2017-12-05 13:34:52 +0100 | [diff] [blame] | 322 | /* |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 323 | * Given an ASID, flush the corresponding user ASID. We can delay this |
| 324 | * until the next time we switch to it. |
| 325 | * |
| 326 | * See SWITCH_TO_USER_CR3. |
| 327 | */ |
| 328 | static inline void invalidate_user_asid(u16 asid) |
| 329 | { |
| 330 | /* There is no user ASID if address space separation is off */ |
| 331 | if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) |
| 332 | return; |
| 333 | |
| 334 | /* |
| 335 | * We only have a single ASID if PCID is off and the CR3 |
| 336 | * write will have flushed it. |
| 337 | */ |
| 338 | if (!cpu_feature_enabled(X86_FEATURE_PCID)) |
| 339 | return; |
| 340 | |
| 341 | if (!static_cpu_has(X86_FEATURE_PTI)) |
| 342 | return; |
| 343 | |
| 344 | __set_bit(kern_pcid(asid), |
| 345 | (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); |
| 346 | } |
| 347 | |
| 348 | /* |
Peter Zijlstra | 3f67af5 | 2017-12-05 13:34:52 +0100 | [diff] [blame] | 349 | * flush the entire current user mapping |
| 350 | */ |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 351 | static inline void __native_flush_tlb(void) |
| 352 | { |
Sebastian Andrzej Siewior | 5cf0791 | 2016-08-05 15:37:39 +0200 | [diff] [blame] | 353 | /* |
Thomas Gleixner | decab08 | 2017-12-30 22:13:54 +0100 | [diff] [blame] | 354 | * Preemption or interrupts must be disabled to protect the access |
| 355 | * to the per CPU variable and to prevent being preempted between |
| 356 | * read_cr3() and write_cr3(). |
Sebastian Andrzej Siewior | 5cf0791 | 2016-08-05 15:37:39 +0200 | [diff] [blame] | 357 | */ |
Thomas Gleixner | decab08 | 2017-12-30 22:13:54 +0100 | [diff] [blame] | 358 | WARN_ON_ONCE(preemptible()); |
| 359 | |
| 360 | invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); |
| 361 | |
| 362 | /* If current->mm == NULL then the read_cr3() "borrows" an mm */ |
Andy Lutomirski | 6c690ee | 2017-06-12 10:26:14 -0700 | [diff] [blame] | 363 | native_write_cr3(__native_read_cr3()); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 364 | } |
| 365 | |
Peter Zijlstra | 3f67af5 | 2017-12-05 13:34:52 +0100 | [diff] [blame] | 366 | /* |
| 367 | * flush everything |
| 368 | */ |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 369 | static inline void __native_flush_tlb_global(void) |
| 370 | { |
Peter Zijlstra | 23cb7d4 | 2017-12-05 13:34:51 +0100 | [diff] [blame] | 371 | unsigned long cr4, flags; |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 372 | |
Andy Lutomirski | d8bced7 | 2016-01-29 11:42:59 -0800 | [diff] [blame] | 373 | if (static_cpu_has(X86_FEATURE_INVPCID)) { |
| 374 | /* |
| 375 | * Using INVPCID is considerably faster than a pair of writes |
| 376 | * to CR4 sandwiched inside an IRQ flag save/restore. |
Dave Hansen | 6cff64b | 2017-12-04 15:08:01 +0100 | [diff] [blame] | 377 | * |
| 378 | * Note, this works with CR4.PCIDE=0 or 1. |
Andy Lutomirski | d8bced7 | 2016-01-29 11:42:59 -0800 | [diff] [blame] | 379 | */ |
| 380 | invpcid_flush_all(); |
| 381 | return; |
| 382 | } |
| 383 | |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 384 | /* |
| 385 | * Read-modify-write to CR4 - protect it from preemption and |
| 386 | * from interrupts. (Use the raw variant because this code can |
| 387 | * be called from deep inside debugging code.) |
| 388 | */ |
| 389 | raw_local_irq_save(flags); |
| 390 | |
Peter Zijlstra | 23cb7d4 | 2017-12-05 13:34:51 +0100 | [diff] [blame] | 391 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 392 | /* toggle PGE */ |
| 393 | native_write_cr4(cr4 ^ X86_CR4_PGE); |
| 394 | /* write old PGE again and flush TLBs */ |
| 395 | native_write_cr4(cr4); |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 396 | |
| 397 | raw_local_irq_restore(flags); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 398 | } |
| 399 | |
Peter Zijlstra | 3f67af5 | 2017-12-05 13:34:52 +0100 | [diff] [blame] | 400 | /* |
| 401 | * flush one page in the user mapping |
| 402 | */ |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 403 | static inline void __native_flush_tlb_single(unsigned long addr) |
| 404 | { |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 405 | u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
| 406 | |
Joe Perches | 94cf8de | 2008-03-23 01:03:45 -0700 | [diff] [blame] | 407 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 408 | |
| 409 | if (!static_cpu_has(X86_FEATURE_PTI)) |
| 410 | return; |
| 411 | |
Dave Hansen | 6cff64b | 2017-12-04 15:08:01 +0100 | [diff] [blame] | 412 | /* |
| 413 | * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1. |
| 414 | * Just use invalidate_user_asid() in case we are called early. |
| 415 | */ |
| 416 | if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) |
| 417 | invalidate_user_asid(loaded_mm_asid); |
| 418 | else |
| 419 | invpcid_flush_one(user_pcid(loaded_mm_asid), addr); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 420 | } |
| 421 | |
Peter Zijlstra | 3f67af5 | 2017-12-05 13:34:52 +0100 | [diff] [blame] | 422 | /* |
| 423 | * flush everything |
| 424 | */ |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 425 | static inline void __flush_tlb_all(void) |
| 426 | { |
Peter Zijlstra | 3f67af5 | 2017-12-05 13:34:52 +0100 | [diff] [blame] | 427 | if (boot_cpu_has(X86_FEATURE_PGE)) { |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 428 | __flush_tlb_global(); |
Peter Zijlstra | 3f67af5 | 2017-12-05 13:34:52 +0100 | [diff] [blame] | 429 | } else { |
| 430 | /* |
| 431 | * !PGE -> !PCID (setup_pcid()), thus every flush is total. |
| 432 | */ |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 433 | __flush_tlb(); |
Peter Zijlstra | 3f67af5 | 2017-12-05 13:34:52 +0100 | [diff] [blame] | 434 | } |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 435 | } |
| 436 | |
Peter Zijlstra | 3f67af5 | 2017-12-05 13:34:52 +0100 | [diff] [blame] | 437 | /* |
| 438 | * flush one page in the kernel mapping |
| 439 | */ |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 440 | static inline void __flush_tlb_one(unsigned long addr) |
| 441 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 442 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
Michael Wang | e8747f1 | 2013-06-04 14:28:18 +0800 | [diff] [blame] | 443 | __flush_tlb_single(addr); |
Dave Hansen | 2ea907c | 2017-12-04 15:07:57 +0100 | [diff] [blame] | 444 | |
| 445 | if (!static_cpu_has(X86_FEATURE_PTI)) |
| 446 | return; |
| 447 | |
| 448 | /* |
| 449 | * __flush_tlb_single() will have cleared the TLB entry for this ASID, |
| 450 | * but since kernel space is replicated across all, we must also |
| 451 | * invalidate all others. |
| 452 | */ |
| 453 | invalidate_other_asid(); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 454 | } |
| 455 | |
Alex Shi | 3e7f3db | 2012-05-10 18:01:59 +0800 | [diff] [blame] | 456 | #define TLB_FLUSH_ALL -1UL |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 457 | |
| 458 | /* |
| 459 | * TLB flushing: |
| 460 | * |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 461 | * - flush_tlb_all() flushes all processes TLBs |
| 462 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 463 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 464 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 465 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 466 | * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 467 | * |
| 468 | * ..but the i386 has somewhat limited tlb flushing capabilities, |
| 469 | * and page-granular flushes are available only on i486 and up. |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 470 | */ |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 471 | struct flush_tlb_info { |
Andy Lutomirski | b0579ad | 2017-06-29 08:53:16 -0700 | [diff] [blame] | 472 | /* |
| 473 | * We support several kinds of flushes. |
| 474 | * |
| 475 | * - Fully flush a single mm. .mm will be set, .end will be |
| 476 | * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to |
| 477 | * which the IPI sender is trying to catch us up. |
| 478 | * |
| 479 | * - Partially flush a single mm. .mm will be set, .start and |
| 480 | * .end will indicate the range, and .new_tlb_gen will be set |
| 481 | * such that the changes between generation .new_tlb_gen-1 and |
| 482 | * .new_tlb_gen are entirely contained in the indicated range. |
| 483 | * |
| 484 | * - Fully flush all mms whose tlb_gens have been updated. .mm |
| 485 | * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen |
| 486 | * will be zero. |
| 487 | */ |
| 488 | struct mm_struct *mm; |
| 489 | unsigned long start; |
| 490 | unsigned long end; |
| 491 | u64 new_tlb_gen; |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 492 | }; |
| 493 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 494 | #define local_flush_tlb() __flush_tlb() |
| 495 | |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 496 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) |
| 497 | |
| 498 | #define flush_tlb_range(vma, start, end) \ |
| 499 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) |
| 500 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 501 | extern void flush_tlb_all(void); |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 502 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 503 | unsigned long end, unsigned long vmflag); |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 504 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 505 | |
Andy Lutomirski | ca6c99c0 | 2017-05-22 15:30:01 -0700 | [diff] [blame] | 506 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
| 507 | { |
| 508 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE); |
| 509 | } |
| 510 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 511 | void native_flush_tlb_others(const struct cpumask *cpumask, |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 512 | const struct flush_tlb_info *info); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 513 | |
Peter Zijlstra | 0a126ab | 2017-12-05 13:34:53 +0100 | [diff] [blame] | 514 | static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) |
| 515 | { |
| 516 | /* |
| 517 | * Bump the generation count. This also serves as a full barrier |
| 518 | * that synchronizes with switch_mm(): callers are required to order |
| 519 | * their read of mm_cpumask after their writes to the paging |
| 520 | * structures. |
| 521 | */ |
| 522 | return atomic64_inc_return(&mm->context.tlb_gen); |
| 523 | } |
| 524 | |
Andy Lutomirski | e73ad5f | 2017-05-22 15:30:03 -0700 | [diff] [blame] | 525 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, |
| 526 | struct mm_struct *mm) |
| 527 | { |
Andy Lutomirski | f39681e | 2017-06-29 08:53:15 -0700 | [diff] [blame] | 528 | inc_mm_tlb_gen(mm); |
Andy Lutomirski | e73ad5f | 2017-05-22 15:30:03 -0700 | [diff] [blame] | 529 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); |
| 530 | } |
| 531 | |
| 532 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); |
| 533 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 534 | #ifndef CONFIG_PARAVIRT |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 535 | #define flush_tlb_others(mask, info) \ |
| 536 | native_flush_tlb_others(mask, info) |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 537 | #endif |
| 538 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 539 | #endif /* _ASM_X86_TLBFLUSH_H */ |