H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_TLBFLUSH_H |
| 2 | #define _ASM_X86_TLBFLUSH_H |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 3 | |
| 4 | #include <linux/mm.h> |
| 5 | #include <linux/sched.h> |
| 6 | |
| 7 | #include <asm/processor.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 8 | #include <asm/cpufeature.h> |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 9 | #include <asm/special_insns.h> |
Andy Lutomirski | ce4a4e56 | 2017-05-28 10:00:14 -0700 | [diff] [blame] | 10 | #include <asm/smp.h> |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 11 | |
Andy Lutomirski | 060a402 | 2016-01-29 11:42:57 -0800 | [diff] [blame] | 12 | static inline void __invpcid(unsigned long pcid, unsigned long addr, |
| 13 | unsigned long type) |
| 14 | { |
Borislav Petkov | e2c7698c | 2016-02-10 15:51:16 +0100 | [diff] [blame] | 15 | struct { u64 d[2]; } desc = { { pcid, addr } }; |
Andy Lutomirski | 060a402 | 2016-01-29 11:42:57 -0800 | [diff] [blame] | 16 | |
| 17 | /* |
| 18 | * The memory clobber is because the whole point is to invalidate |
| 19 | * stale TLB entries and, especially if we're flushing global |
| 20 | * mappings, we don't want the compiler to reorder any subsequent |
| 21 | * memory accesses before the TLB flush. |
| 22 | * |
| 23 | * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and |
| 24 | * invpcid (%rcx), %rax in long mode. |
| 25 | */ |
| 26 | asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" |
Borislav Petkov | e2c7698c | 2016-02-10 15:51:16 +0100 | [diff] [blame] | 27 | : : "m" (desc), "a" (type), "c" (&desc) : "memory"); |
Andy Lutomirski | 060a402 | 2016-01-29 11:42:57 -0800 | [diff] [blame] | 28 | } |
| 29 | |
| 30 | #define INVPCID_TYPE_INDIV_ADDR 0 |
| 31 | #define INVPCID_TYPE_SINGLE_CTXT 1 |
| 32 | #define INVPCID_TYPE_ALL_INCL_GLOBAL 2 |
| 33 | #define INVPCID_TYPE_ALL_NON_GLOBAL 3 |
| 34 | |
| 35 | /* Flush all mappings for a given pcid and addr, not including globals. */ |
| 36 | static inline void invpcid_flush_one(unsigned long pcid, |
| 37 | unsigned long addr) |
| 38 | { |
| 39 | __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); |
| 40 | } |
| 41 | |
| 42 | /* Flush all mappings for a given PCID, not including globals. */ |
| 43 | static inline void invpcid_flush_single_context(unsigned long pcid) |
| 44 | { |
| 45 | __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); |
| 46 | } |
| 47 | |
| 48 | /* Flush all mappings, including globals, for all PCIDs. */ |
| 49 | static inline void invpcid_flush_all(void) |
| 50 | { |
| 51 | __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); |
| 52 | } |
| 53 | |
| 54 | /* Flush all mappings for all PCIDs except globals. */ |
| 55 | static inline void invpcid_flush_all_nonglobals(void) |
| 56 | { |
| 57 | __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); |
| 58 | } |
| 59 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 60 | #ifdef CONFIG_PARAVIRT |
| 61 | #include <asm/paravirt.h> |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 62 | #else |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 63 | #define __flush_tlb() __native_flush_tlb() |
| 64 | #define __flush_tlb_global() __native_flush_tlb_global() |
| 65 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 66 | #endif |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 67 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 68 | struct tlb_state { |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame^] | 69 | /* |
| 70 | * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts |
| 71 | * are on. This means that it may not match current->active_mm, |
| 72 | * which will contain the previous user mm when we're in lazy TLB |
| 73 | * mode even if we've already switched back to swapper_pg_dir. |
| 74 | */ |
| 75 | struct mm_struct *loaded_mm; |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 76 | int state; |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 77 | |
| 78 | /* |
| 79 | * Access to this CR4 shadow and to H/W CR4 is protected by |
| 80 | * disabling interrupts when modifying either one. |
| 81 | */ |
| 82 | unsigned long cr4; |
| 83 | }; |
| 84 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
| 85 | |
| 86 | /* Initialize cr4 shadow for this CPU. */ |
| 87 | static inline void cr4_init_shadow(void) |
| 88 | { |
Andy Lutomirski | 1ef55be1 | 2016-09-29 12:48:12 -0700 | [diff] [blame] | 89 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 90 | } |
| 91 | |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 92 | /* Set in this cpu's CR4. */ |
| 93 | static inline void cr4_set_bits(unsigned long mask) |
| 94 | { |
| 95 | unsigned long cr4; |
| 96 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 97 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 98 | if ((cr4 | mask) != cr4) { |
| 99 | cr4 |= mask; |
| 100 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 101 | __write_cr4(cr4); |
| 102 | } |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | /* Clear in this cpu's CR4. */ |
| 106 | static inline void cr4_clear_bits(unsigned long mask) |
| 107 | { |
| 108 | unsigned long cr4; |
| 109 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 110 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 111 | if ((cr4 & ~mask) != cr4) { |
| 112 | cr4 &= ~mask; |
| 113 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 114 | __write_cr4(cr4); |
| 115 | } |
| 116 | } |
| 117 | |
Thomas Gleixner | 5a92015 | 2017-02-14 00:11:04 -0800 | [diff] [blame] | 118 | static inline void cr4_toggle_bits(unsigned long mask) |
| 119 | { |
| 120 | unsigned long cr4; |
| 121 | |
| 122 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 123 | cr4 ^= mask; |
| 124 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 125 | __write_cr4(cr4); |
| 126 | } |
| 127 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 128 | /* Read the CR4 shadow. */ |
| 129 | static inline unsigned long cr4_read_shadow(void) |
| 130 | { |
| 131 | return this_cpu_read(cpu_tlbstate.cr4); |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | /* |
| 135 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB |
| 136 | * enable and PPro Global page enable), so that any CPU's that boot |
| 137 | * up after us can get the correct flags. This should only be used |
| 138 | * during boot on the boot cpu. |
| 139 | */ |
| 140 | extern unsigned long mmu_cr4_features; |
| 141 | extern u32 *trampoline_cr4_features; |
| 142 | |
| 143 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) |
| 144 | { |
| 145 | mmu_cr4_features |= mask; |
| 146 | if (trampoline_cr4_features) |
| 147 | *trampoline_cr4_features = mmu_cr4_features; |
| 148 | cr4_set_bits(mask); |
| 149 | } |
| 150 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 151 | static inline void __native_flush_tlb(void) |
| 152 | { |
Sebastian Andrzej Siewior | 5cf0791 | 2016-08-05 15:37:39 +0200 | [diff] [blame] | 153 | /* |
| 154 | * If current->mm == NULL then we borrow a mm which may change during a |
| 155 | * task switch and therefore we must not be preempted while we write CR3 |
| 156 | * back: |
| 157 | */ |
| 158 | preempt_disable(); |
Chris Wright | d7285c6 | 2009-04-23 10:21:38 -0700 | [diff] [blame] | 159 | native_write_cr3(native_read_cr3()); |
Sebastian Andrzej Siewior | 5cf0791 | 2016-08-05 15:37:39 +0200 | [diff] [blame] | 160 | preempt_enable(); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 161 | } |
| 162 | |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame] | 163 | static inline void __native_flush_tlb_global_irq_disabled(void) |
| 164 | { |
| 165 | unsigned long cr4; |
| 166 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 167 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame] | 168 | /* clear PGE */ |
| 169 | native_write_cr4(cr4 & ~X86_CR4_PGE); |
| 170 | /* write old PGE again and flush TLBs */ |
| 171 | native_write_cr4(cr4); |
| 172 | } |
| 173 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 174 | static inline void __native_flush_tlb_global(void) |
| 175 | { |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 176 | unsigned long flags; |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 177 | |
Andy Lutomirski | d8bced7 | 2016-01-29 11:42:59 -0800 | [diff] [blame] | 178 | if (static_cpu_has(X86_FEATURE_INVPCID)) { |
| 179 | /* |
| 180 | * Using INVPCID is considerably faster than a pair of writes |
| 181 | * to CR4 sandwiched inside an IRQ flag save/restore. |
| 182 | */ |
| 183 | invpcid_flush_all(); |
| 184 | return; |
| 185 | } |
| 186 | |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 187 | /* |
| 188 | * Read-modify-write to CR4 - protect it from preemption and |
| 189 | * from interrupts. (Use the raw variant because this code can |
| 190 | * be called from deep inside debugging code.) |
| 191 | */ |
| 192 | raw_local_irq_save(flags); |
| 193 | |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame] | 194 | __native_flush_tlb_global_irq_disabled(); |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 195 | |
| 196 | raw_local_irq_restore(flags); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | static inline void __native_flush_tlb_single(unsigned long addr) |
| 200 | { |
Joe Perches | 94cf8de | 2008-03-23 01:03:45 -0700 | [diff] [blame] | 201 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | static inline void __flush_tlb_all(void) |
| 205 | { |
Daniel Borkmann | 2c4ea6e | 2017-03-11 01:31:19 +0100 | [diff] [blame] | 206 | if (boot_cpu_has(X86_FEATURE_PGE)) |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 207 | __flush_tlb_global(); |
| 208 | else |
| 209 | __flush_tlb(); |
| 210 | } |
| 211 | |
| 212 | static inline void __flush_tlb_one(unsigned long addr) |
| 213 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 214 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
Michael Wang | e8747f1 | 2013-06-04 14:28:18 +0800 | [diff] [blame] | 215 | __flush_tlb_single(addr); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 216 | } |
| 217 | |
Alex Shi | 3e7f3db | 2012-05-10 18:01:59 +0800 | [diff] [blame] | 218 | #define TLB_FLUSH_ALL -1UL |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 219 | |
| 220 | /* |
| 221 | * TLB flushing: |
| 222 | * |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 223 | * - flush_tlb_all() flushes all processes TLBs |
| 224 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 225 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 226 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 227 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 228 | * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 229 | * |
| 230 | * ..but the i386 has somewhat limited tlb flushing capabilities, |
| 231 | * and page-granular flushes are available only on i486 and up. |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 232 | */ |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 233 | struct flush_tlb_info { |
| 234 | struct mm_struct *mm; |
| 235 | unsigned long start; |
| 236 | unsigned long end; |
| 237 | }; |
| 238 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 239 | #define local_flush_tlb() __flush_tlb() |
| 240 | |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 241 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) |
| 242 | |
| 243 | #define flush_tlb_range(vma, start, end) \ |
| 244 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) |
| 245 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 246 | extern void flush_tlb_all(void); |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 247 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 248 | unsigned long end, unsigned long vmflag); |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 249 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 250 | |
Andy Lutomirski | ca6c99c0 | 2017-05-22 15:30:01 -0700 | [diff] [blame] | 251 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
| 252 | { |
| 253 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE); |
| 254 | } |
| 255 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 256 | void native_flush_tlb_others(const struct cpumask *cpumask, |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 257 | const struct flush_tlb_info *info); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 258 | |
| 259 | #define TLBSTATE_OK 1 |
| 260 | #define TLBSTATE_LAZY 2 |
| 261 | |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 262 | static inline void reset_lazy_tlbstate(void) |
| 263 | { |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 264 | this_cpu_write(cpu_tlbstate.state, 0); |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame^] | 265 | this_cpu_write(cpu_tlbstate.loaded_mm, &init_mm); |
| 266 | |
| 267 | WARN_ON(read_cr3() != __pa_symbol(swapper_pg_dir)); |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 268 | } |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 269 | |
Andy Lutomirski | e73ad5f | 2017-05-22 15:30:03 -0700 | [diff] [blame] | 270 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, |
| 271 | struct mm_struct *mm) |
| 272 | { |
| 273 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); |
| 274 | } |
| 275 | |
| 276 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); |
| 277 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 278 | #ifndef CONFIG_PARAVIRT |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 279 | #define flush_tlb_others(mask, info) \ |
| 280 | native_flush_tlb_others(mask, info) |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 281 | #endif |
| 282 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 283 | #endif /* _ASM_X86_TLBFLUSH_H */ |