H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_TLBFLUSH_H |
| 2 | #define _ASM_X86_TLBFLUSH_H |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 3 | |
| 4 | #include <linux/mm.h> |
| 5 | #include <linux/sched.h> |
| 6 | |
| 7 | #include <asm/processor.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 8 | #include <asm/cpufeature.h> |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 9 | #include <asm/special_insns.h> |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 10 | |
Andy Lutomirski | 060a402 | 2016-01-29 11:42:57 -0800 | [diff] [blame] | 11 | static inline void __invpcid(unsigned long pcid, unsigned long addr, |
| 12 | unsigned long type) |
| 13 | { |
Borislav Petkov | e2c7698c | 2016-02-10 15:51:16 +0100 | [diff] [blame] | 14 | struct { u64 d[2]; } desc = { { pcid, addr } }; |
Andy Lutomirski | 060a402 | 2016-01-29 11:42:57 -0800 | [diff] [blame] | 15 | |
| 16 | /* |
| 17 | * The memory clobber is because the whole point is to invalidate |
| 18 | * stale TLB entries and, especially if we're flushing global |
| 19 | * mappings, we don't want the compiler to reorder any subsequent |
| 20 | * memory accesses before the TLB flush. |
| 21 | * |
| 22 | * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and |
| 23 | * invpcid (%rcx), %rax in long mode. |
| 24 | */ |
| 25 | asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" |
Borislav Petkov | e2c7698c | 2016-02-10 15:51:16 +0100 | [diff] [blame] | 26 | : : "m" (desc), "a" (type), "c" (&desc) : "memory"); |
Andy Lutomirski | 060a402 | 2016-01-29 11:42:57 -0800 | [diff] [blame] | 27 | } |
| 28 | |
| 29 | #define INVPCID_TYPE_INDIV_ADDR 0 |
| 30 | #define INVPCID_TYPE_SINGLE_CTXT 1 |
| 31 | #define INVPCID_TYPE_ALL_INCL_GLOBAL 2 |
| 32 | #define INVPCID_TYPE_ALL_NON_GLOBAL 3 |
| 33 | |
| 34 | /* Flush all mappings for a given pcid and addr, not including globals. */ |
| 35 | static inline void invpcid_flush_one(unsigned long pcid, |
| 36 | unsigned long addr) |
| 37 | { |
| 38 | __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); |
| 39 | } |
| 40 | |
| 41 | /* Flush all mappings for a given PCID, not including globals. */ |
| 42 | static inline void invpcid_flush_single_context(unsigned long pcid) |
| 43 | { |
| 44 | __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); |
| 45 | } |
| 46 | |
| 47 | /* Flush all mappings, including globals, for all PCIDs. */ |
| 48 | static inline void invpcid_flush_all(void) |
| 49 | { |
| 50 | __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); |
| 51 | } |
| 52 | |
| 53 | /* Flush all mappings for all PCIDs except globals. */ |
| 54 | static inline void invpcid_flush_all_nonglobals(void) |
| 55 | { |
| 56 | __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); |
| 57 | } |
| 58 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 59 | #ifdef CONFIG_PARAVIRT |
| 60 | #include <asm/paravirt.h> |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 61 | #else |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 62 | #define __flush_tlb() __native_flush_tlb() |
| 63 | #define __flush_tlb_global() __native_flush_tlb_global() |
| 64 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 65 | #endif |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 66 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 67 | struct tlb_state { |
| 68 | #ifdef CONFIG_SMP |
| 69 | struct mm_struct *active_mm; |
| 70 | int state; |
| 71 | #endif |
| 72 | |
| 73 | /* |
| 74 | * Access to this CR4 shadow and to H/W CR4 is protected by |
| 75 | * disabling interrupts when modifying either one. |
| 76 | */ |
| 77 | unsigned long cr4; |
| 78 | }; |
| 79 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
| 80 | |
| 81 | /* Initialize cr4 shadow for this CPU. */ |
| 82 | static inline void cr4_init_shadow(void) |
| 83 | { |
Andy Lutomirski | 1ef55be1 | 2016-09-29 12:48:12 -0700 | [diff] [blame] | 84 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 85 | } |
| 86 | |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 87 | /* Set in this cpu's CR4. */ |
| 88 | static inline void cr4_set_bits(unsigned long mask) |
| 89 | { |
| 90 | unsigned long cr4; |
| 91 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 92 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 93 | if ((cr4 | mask) != cr4) { |
| 94 | cr4 |= mask; |
| 95 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 96 | __write_cr4(cr4); |
| 97 | } |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | /* Clear in this cpu's CR4. */ |
| 101 | static inline void cr4_clear_bits(unsigned long mask) |
| 102 | { |
| 103 | unsigned long cr4; |
| 104 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 105 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 106 | if ((cr4 & ~mask) != cr4) { |
| 107 | cr4 &= ~mask; |
| 108 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 109 | __write_cr4(cr4); |
| 110 | } |
| 111 | } |
| 112 | |
Thomas Gleixner | 5a92015 | 2017-02-14 00:11:04 -0800 | [diff] [blame^] | 113 | static inline void cr4_toggle_bits(unsigned long mask) |
| 114 | { |
| 115 | unsigned long cr4; |
| 116 | |
| 117 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 118 | cr4 ^= mask; |
| 119 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 120 | __write_cr4(cr4); |
| 121 | } |
| 122 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 123 | /* Read the CR4 shadow. */ |
| 124 | static inline unsigned long cr4_read_shadow(void) |
| 125 | { |
| 126 | return this_cpu_read(cpu_tlbstate.cr4); |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | /* |
| 130 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB |
| 131 | * enable and PPro Global page enable), so that any CPU's that boot |
| 132 | * up after us can get the correct flags. This should only be used |
| 133 | * during boot on the boot cpu. |
| 134 | */ |
| 135 | extern unsigned long mmu_cr4_features; |
| 136 | extern u32 *trampoline_cr4_features; |
| 137 | |
| 138 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) |
| 139 | { |
| 140 | mmu_cr4_features |= mask; |
| 141 | if (trampoline_cr4_features) |
| 142 | *trampoline_cr4_features = mmu_cr4_features; |
| 143 | cr4_set_bits(mask); |
| 144 | } |
| 145 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 146 | static inline void __native_flush_tlb(void) |
| 147 | { |
Sebastian Andrzej Siewior | 5cf0791 | 2016-08-05 15:37:39 +0200 | [diff] [blame] | 148 | /* |
| 149 | * If current->mm == NULL then we borrow a mm which may change during a |
| 150 | * task switch and therefore we must not be preempted while we write CR3 |
| 151 | * back: |
| 152 | */ |
| 153 | preempt_disable(); |
Chris Wright | d7285c6 | 2009-04-23 10:21:38 -0700 | [diff] [blame] | 154 | native_write_cr3(native_read_cr3()); |
Sebastian Andrzej Siewior | 5cf0791 | 2016-08-05 15:37:39 +0200 | [diff] [blame] | 155 | preempt_enable(); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 156 | } |
| 157 | |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame] | 158 | static inline void __native_flush_tlb_global_irq_disabled(void) |
| 159 | { |
| 160 | unsigned long cr4; |
| 161 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 162 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame] | 163 | /* clear PGE */ |
| 164 | native_write_cr4(cr4 & ~X86_CR4_PGE); |
| 165 | /* write old PGE again and flush TLBs */ |
| 166 | native_write_cr4(cr4); |
| 167 | } |
| 168 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 169 | static inline void __native_flush_tlb_global(void) |
| 170 | { |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 171 | unsigned long flags; |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 172 | |
Andy Lutomirski | d8bced7 | 2016-01-29 11:42:59 -0800 | [diff] [blame] | 173 | if (static_cpu_has(X86_FEATURE_INVPCID)) { |
| 174 | /* |
| 175 | * Using INVPCID is considerably faster than a pair of writes |
| 176 | * to CR4 sandwiched inside an IRQ flag save/restore. |
| 177 | */ |
| 178 | invpcid_flush_all(); |
| 179 | return; |
| 180 | } |
| 181 | |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 182 | /* |
| 183 | * Read-modify-write to CR4 - protect it from preemption and |
| 184 | * from interrupts. (Use the raw variant because this code can |
| 185 | * be called from deep inside debugging code.) |
| 186 | */ |
| 187 | raw_local_irq_save(flags); |
| 188 | |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame] | 189 | __native_flush_tlb_global_irq_disabled(); |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 190 | |
| 191 | raw_local_irq_restore(flags); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | static inline void __native_flush_tlb_single(unsigned long addr) |
| 195 | { |
Joe Perches | 94cf8de | 2008-03-23 01:03:45 -0700 | [diff] [blame] | 196 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | static inline void __flush_tlb_all(void) |
| 200 | { |
Borislav Petkov | c109bf9 | 2016-03-29 17:42:02 +0200 | [diff] [blame] | 201 | if (static_cpu_has(X86_FEATURE_PGE)) |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 202 | __flush_tlb_global(); |
| 203 | else |
| 204 | __flush_tlb(); |
| 205 | } |
| 206 | |
| 207 | static inline void __flush_tlb_one(unsigned long addr) |
| 208 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 209 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
Michael Wang | e8747f1 | 2013-06-04 14:28:18 +0800 | [diff] [blame] | 210 | __flush_tlb_single(addr); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 211 | } |
| 212 | |
Alex Shi | 3e7f3db | 2012-05-10 18:01:59 +0800 | [diff] [blame] | 213 | #define TLB_FLUSH_ALL -1UL |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 214 | |
| 215 | /* |
| 216 | * TLB flushing: |
| 217 | * |
| 218 | * - flush_tlb() flushes the current mm struct TLBs |
| 219 | * - flush_tlb_all() flushes all processes TLBs |
| 220 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 221 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 222 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 223 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 224 | * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 225 | * |
| 226 | * ..but the i386 has somewhat limited tlb flushing capabilities, |
| 227 | * and page-granular flushes are available only on i486 and up. |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 228 | */ |
| 229 | |
| 230 | #ifndef CONFIG_SMP |
| 231 | |
Dave Hansen | 6df4686 | 2013-09-11 14:20:24 -0700 | [diff] [blame] | 232 | /* "_up" is for UniProcessor. |
| 233 | * |
| 234 | * This is a helper for other header functions. *Not* intended to be called |
| 235 | * directly. All global TLB flushes need to either call this, or to bump the |
| 236 | * vm statistics themselves. |
| 237 | */ |
| 238 | static inline void __flush_tlb_up(void) |
| 239 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 240 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
Dave Hansen | 6df4686 | 2013-09-11 14:20:24 -0700 | [diff] [blame] | 241 | __flush_tlb(); |
| 242 | } |
| 243 | |
| 244 | static inline void flush_tlb_all(void) |
| 245 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 246 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
Dave Hansen | 6df4686 | 2013-09-11 14:20:24 -0700 | [diff] [blame] | 247 | __flush_tlb_all(); |
| 248 | } |
| 249 | |
| 250 | static inline void flush_tlb(void) |
| 251 | { |
| 252 | __flush_tlb_up(); |
| 253 | } |
| 254 | |
| 255 | static inline void local_flush_tlb(void) |
| 256 | { |
| 257 | __flush_tlb_up(); |
| 258 | } |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 259 | |
| 260 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 261 | { |
| 262 | if (mm == current->active_mm) |
Dave Hansen | 6df4686 | 2013-09-11 14:20:24 -0700 | [diff] [blame] | 263 | __flush_tlb_up(); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
| 267 | unsigned long addr) |
| 268 | { |
| 269 | if (vma->vm_mm == current->active_mm) |
| 270 | __flush_tlb_one(addr); |
| 271 | } |
| 272 | |
| 273 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
| 274 | unsigned long start, unsigned long end) |
| 275 | { |
| 276 | if (vma->vm_mm == current->active_mm) |
Dave Hansen | 6df4686 | 2013-09-11 14:20:24 -0700 | [diff] [blame] | 277 | __flush_tlb_up(); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 278 | } |
| 279 | |
Alex Shi | 7efa1c8 | 2012-07-20 09:18:23 +0800 | [diff] [blame] | 280 | static inline void flush_tlb_mm_range(struct mm_struct *mm, |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 281 | unsigned long start, unsigned long end, unsigned long vmflag) |
| 282 | { |
Alex Shi | 7efa1c8 | 2012-07-20 09:18:23 +0800 | [diff] [blame] | 283 | if (mm == current->active_mm) |
Dave Hansen | 6df4686 | 2013-09-11 14:20:24 -0700 | [diff] [blame] | 284 | __flush_tlb_up(); |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 285 | } |
| 286 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 287 | static inline void native_flush_tlb_others(const struct cpumask *cpumask, |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 288 | struct mm_struct *mm, |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 289 | unsigned long start, |
| 290 | unsigned long end) |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 291 | { |
| 292 | } |
| 293 | |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 294 | static inline void reset_lazy_tlbstate(void) |
| 295 | { |
| 296 | } |
| 297 | |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 298 | static inline void flush_tlb_kernel_range(unsigned long start, |
| 299 | unsigned long end) |
| 300 | { |
| 301 | flush_tlb_all(); |
| 302 | } |
| 303 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 304 | #else /* SMP */ |
| 305 | |
| 306 | #include <asm/smp.h> |
| 307 | |
| 308 | #define local_flush_tlb() __flush_tlb() |
| 309 | |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 310 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) |
| 311 | |
| 312 | #define flush_tlb_range(vma, start, end) \ |
| 313 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) |
| 314 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 315 | extern void flush_tlb_all(void); |
| 316 | extern void flush_tlb_current_task(void); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 317 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 318 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 319 | unsigned long end, unsigned long vmflag); |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 320 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 321 | |
| 322 | #define flush_tlb() flush_tlb_current_task() |
| 323 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 324 | void native_flush_tlb_others(const struct cpumask *cpumask, |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 325 | struct mm_struct *mm, |
| 326 | unsigned long start, unsigned long end); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 327 | |
| 328 | #define TLBSTATE_OK 1 |
| 329 | #define TLBSTATE_LAZY 2 |
| 330 | |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 331 | static inline void reset_lazy_tlbstate(void) |
| 332 | { |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 333 | this_cpu_write(cpu_tlbstate.state, 0); |
| 334 | this_cpu_write(cpu_tlbstate.active_mm, &init_mm); |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 335 | } |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 336 | |
| 337 | #endif /* SMP */ |
| 338 | |
| 339 | #ifndef CONFIG_PARAVIRT |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 340 | #define flush_tlb_others(mask, mm, start, end) \ |
| 341 | native_flush_tlb_others(mask, mm, start, end) |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 342 | #endif |
| 343 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 344 | #endif /* _ASM_X86_TLBFLUSH_H */ |