Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Andy Lutomirski | 478dc89 | 2015-11-12 12:59:04 -0800 | [diff] [blame] | 2 | #include <linux/jump_label.h> |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 3 | #include <asm/unwind_hints.h> |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 4 | #include <asm/cpufeatures.h> |
| 5 | #include <asm/page_types.h> |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 6 | #include <asm/percpu.h> |
| 7 | #include <asm/asm-offsets.h> |
| 8 | #include <asm/processor-flags.h> |
Andy Lutomirski | 478dc89 | 2015-11-12 12:59:04 -0800 | [diff] [blame] | 9 | |
Ingo Molnar | 0c2bd5a | 2008-01-30 13:32:49 +0100 | [diff] [blame] | 10 | /* |
Ingo Molnar | 063f891 | 2009-02-03 18:02:36 +0100 | [diff] [blame] | 11 | |
| 12 | x86 function call convention, 64-bit: |
| 13 | ------------------------------------- |
| 14 | arguments | callee-saved | extra caller-saved | return |
| 15 | [callee-clobbered] | | [callee-clobbered] | |
| 16 | --------------------------------------------------------------------------- |
| 17 | rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**] |
| 18 | |
| 19 | ( rsp is obviously invariant across normal function calls. (gcc can 'merge' |
| 20 | functions when it sees tail-call optimization possibilities) rflags is |
| 21 | clobbered. Leftover arguments are passed over the stack frame.) |
| 22 | |
| 23 | [*] In the frame-pointers case rbp is fixed to the stack frame. |
| 24 | |
| 25 | [**] for struct return values wider than 64 bits the return convention is a |
| 26 | bit more complex: up to 128 bits width we return small structures |
| 27 | straight in rax, rdx. For structures larger than that (3 words or |
| 28 | larger) the caller puts a pointer to an on-stack return struct |
| 29 | [allocated in the caller's stack frame] into the first argument - i.e. |
| 30 | into rdi. All other arguments shift up by one in this case. |
| 31 | Fortunately this case is rare in the kernel. |
| 32 | |
| 33 | For 32-bit we have the following conventions - kernel is built with |
| 34 | -mregparm=3 and -freg-struct-return: |
| 35 | |
| 36 | x86 function calling convention, 32-bit: |
| 37 | ---------------------------------------- |
| 38 | arguments | callee-saved | extra caller-saved | return |
| 39 | [callee-clobbered] | | [callee-clobbered] | |
| 40 | ------------------------------------------------------------------------- |
| 41 | eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**] |
| 42 | |
| 43 | ( here too esp is obviously invariant across normal function calls. eflags |
| 44 | is clobbered. Leftover arguments are passed over the stack frame. ) |
| 45 | |
| 46 | [*] In the frame-pointers case ebp is fixed to the stack frame. |
| 47 | |
| 48 | [**] We build with -freg-struct-return, which on 32-bit means similar |
| 49 | semantics as on 64-bit: edx can be used for a second return value |
| 50 | (i.e. covering integer and structure sizes up to 64 bits) - after that |
| 51 | it gets more complex and more expensive: 3-word or larger struct returns |
| 52 | get done in the caller's frame and the pointer to the return struct goes |
| 53 | into regparm0, i.e. eax - the other arguments shift up and the |
| 54 | function's register parameters degenerate to regparm=2 in essence. |
| 55 | |
| 56 | */ |
| 57 | |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 58 | #ifdef CONFIG_X86_64 |
| 59 | |
Ingo Molnar | 063f891 | 2009-02-03 18:02:36 +0100 | [diff] [blame] | 60 | /* |
Tao Guo | 1b2b23d | 2012-09-26 04:28:22 -0400 | [diff] [blame] | 61 | * 64-bit system call stack frame layout defines and helpers, |
| 62 | * for assembly code: |
Ingo Molnar | 0c2bd5a | 2008-01-30 13:32:49 +0100 | [diff] [blame] | 63 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 65 | /* The layout forms the "struct pt_regs" on the stack: */ |
| 66 | /* |
| 67 | * C ABI says these regs are callee-preserved. They aren't saved on kernel entry |
| 68 | * unless syscall needs a complete, fully filled "struct pt_regs". |
| 69 | */ |
| 70 | #define R15 0*8 |
| 71 | #define R14 1*8 |
| 72 | #define R13 2*8 |
| 73 | #define R12 3*8 |
| 74 | #define RBP 4*8 |
| 75 | #define RBX 5*8 |
| 76 | /* These regs are callee-clobbered. Always saved on kernel entry. */ |
| 77 | #define R11 6*8 |
| 78 | #define R10 7*8 |
| 79 | #define R9 8*8 |
| 80 | #define R8 9*8 |
| 81 | #define RAX 10*8 |
| 82 | #define RCX 11*8 |
| 83 | #define RDX 12*8 |
| 84 | #define RSI 13*8 |
| 85 | #define RDI 14*8 |
| 86 | /* |
| 87 | * On syscall entry, this is syscall#. On CPU exception, this is error code. |
| 88 | * On hw interrupt, it's IRQ number: |
| 89 | */ |
| 90 | #define ORIG_RAX 15*8 |
| 91 | /* Return frame for iretq */ |
| 92 | #define RIP 16*8 |
| 93 | #define CS 17*8 |
| 94 | #define EFLAGS 18*8 |
| 95 | #define RSP 19*8 |
| 96 | #define SS 20*8 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Denys Vlasenko | 911d2bb | 2015-02-26 14:40:36 -0800 | [diff] [blame] | 98 | #define SIZEOF_PTREGS 21*8 |
| 99 | |
Alexander Kuleshov | 59df226 | 2016-10-20 01:11:08 +0600 | [diff] [blame] | 100 | .macro ALLOC_PT_GPREGS_ON_STACK |
| 101 | addq $-(15*8), %rsp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | .endm |
| 103 | |
Dominik Brodowski | 2e3f009 | 2018-02-11 11:49:42 +0100 | [diff] [blame] | 104 | .macro SAVE_REGS offset=0 |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 105 | movq %rdi, 14*8+\offset(%rsp) |
Dominik Brodowski | 2e3f009 | 2018-02-11 11:49:42 +0100 | [diff] [blame] | 106 | movq %rsi, 13*8+\offset(%rsp) |
| 107 | movq %rdx, 12*8+\offset(%rsp) |
| 108 | movq %rcx, 11*8+\offset(%rsp) |
| 109 | movq %rax, 10*8+\offset(%rsp) |
| 110 | movq %r8, 9*8+\offset(%rsp) |
| 111 | movq %r9, 8*8+\offset(%rsp) |
| 112 | movq %r10, 7*8+\offset(%rsp) |
| 113 | movq %r11, 6*8+\offset(%rsp) |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 114 | movq %rbx, 5*8+\offset(%rsp) |
Dominik Brodowski | 2e3f009 | 2018-02-11 11:49:42 +0100 | [diff] [blame] | 115 | movq %rbp, 4*8+\offset(%rsp) |
| 116 | movq %r12, 3*8+\offset(%rsp) |
| 117 | movq %r13, 2*8+\offset(%rsp) |
| 118 | movq %r14, 1*8+\offset(%rsp) |
| 119 | movq %r15, 0*8+\offset(%rsp) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 120 | UNWIND_HINT_REGS offset=\offset |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 121 | .endm |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 122 | |
Dan Williams | 3ac6d8c | 2018-02-05 17:18:11 -0800 | [diff] [blame] | 123 | /* |
| 124 | * Sanitize registers of values that a speculation attack |
| 125 | * might otherwise want to exploit. The lower registers are |
| 126 | * likely clobbered well before they could be put to use in |
| 127 | * a speculative execution gadget: |
| 128 | */ |
| 129 | .macro CLEAR_REGS_NOSPEC |
| 130 | xorl %ebp, %ebp |
| 131 | xorl %ebx, %ebx |
| 132 | xorq %r8, %r8 |
| 133 | xorq %r9, %r9 |
| 134 | xorq %r10, %r10 |
| 135 | xorq %r11, %r11 |
| 136 | xorq %r12, %r12 |
| 137 | xorq %r13, %r13 |
| 138 | xorq %r14, %r14 |
| 139 | xorq %r15, %r15 |
| 140 | .endm |
| 141 | |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame^] | 142 | .macro POP_REGS pop_rdi=1 skip_r11rcx=0 |
Andy Lutomirski | e872045 | 2017-11-02 00:59:01 -0700 | [diff] [blame] | 143 | popq %r15 |
| 144 | popq %r14 |
| 145 | popq %r13 |
| 146 | popq %r12 |
| 147 | popq %rbp |
| 148 | popq %rbx |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame^] | 149 | .if \skip_r11rcx |
| 150 | popq %rsi |
| 151 | .else |
Andy Lutomirski | e872045 | 2017-11-02 00:59:01 -0700 | [diff] [blame] | 152 | popq %r11 |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame^] | 153 | .endif |
Andy Lutomirski | e872045 | 2017-11-02 00:59:01 -0700 | [diff] [blame] | 154 | popq %r10 |
| 155 | popq %r9 |
| 156 | popq %r8 |
| 157 | popq %rax |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame^] | 158 | .if \skip_r11rcx |
| 159 | popq %rsi |
| 160 | .else |
Andy Lutomirski | e872045 | 2017-11-02 00:59:01 -0700 | [diff] [blame] | 161 | popq %rcx |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame^] | 162 | .endif |
Andy Lutomirski | e872045 | 2017-11-02 00:59:01 -0700 | [diff] [blame] | 163 | popq %rdx |
| 164 | popq %rsi |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame^] | 165 | .if \pop_rdi |
Andy Lutomirski | e872045 | 2017-11-02 00:59:01 -0700 | [diff] [blame] | 166 | popq %rdi |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame^] | 167 | .endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | .endm |
| 169 | |
| 170 | .macro icebp |
| 171 | .byte 0xf1 |
| 172 | .endm |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 173 | |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 174 | /* |
| 175 | * This is a sneaky trick to help the unwinder find pt_regs on the stack. The |
| 176 | * frame pointer is replaced with an encoded pointer to pt_regs. The encoding |
| 177 | * is just setting the LSB, which makes it an invalid stack address and is also |
| 178 | * a signal to the unwinder that it's a pt_regs pointer in disguise. |
| 179 | * |
Dominik Brodowski | 2e3f009 | 2018-02-11 11:49:42 +0100 | [diff] [blame] | 180 | * NOTE: This macro must be used *after* SAVE_REGS because it corrupts |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 181 | * the original rbp. |
| 182 | */ |
| 183 | .macro ENCODE_FRAME_POINTER ptregs_offset=0 |
| 184 | #ifdef CONFIG_FRAME_POINTER |
| 185 | .if \ptregs_offset |
| 186 | leaq \ptregs_offset(%rsp), %rbp |
| 187 | .else |
| 188 | mov %rsp, %rbp |
| 189 | .endif |
| 190 | orq $0x1, %rbp |
| 191 | #endif |
| 192 | .endm |
| 193 | |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 194 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
| 195 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 196 | /* |
| 197 | * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two |
| 198 | * halves: |
| 199 | */ |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 200 | #define PTI_USER_PGTABLE_BIT PAGE_SHIFT |
| 201 | #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) |
| 202 | #define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT |
| 203 | #define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) |
| 204 | #define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK) |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 205 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 206 | .macro SET_NOFLUSH_BIT reg:req |
| 207 | bts $X86_CR3_PCID_NOFLUSH_BIT, \reg |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 208 | .endm |
| 209 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 210 | .macro ADJUST_KERNEL_CR3 reg:req |
| 211 | ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID |
| 212 | /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 213 | andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 214 | .endm |
| 215 | |
| 216 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 217 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 218 | mov %cr3, \scratch_reg |
| 219 | ADJUST_KERNEL_CR3 \scratch_reg |
| 220 | mov \scratch_reg, %cr3 |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 221 | .Lend_\@: |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 222 | .endm |
| 223 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 224 | #define THIS_CPU_user_pcid_flush_mask \ |
| 225 | PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask |
| 226 | |
| 227 | .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 228 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 229 | mov %cr3, \scratch_reg |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 230 | |
| 231 | ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID |
| 232 | |
| 233 | /* |
| 234 | * Test if the ASID needs a flush. |
| 235 | */ |
| 236 | movq \scratch_reg, \scratch_reg2 |
| 237 | andq $(0x7FF), \scratch_reg /* mask ASID */ |
| 238 | bt \scratch_reg, THIS_CPU_user_pcid_flush_mask |
| 239 | jnc .Lnoflush_\@ |
| 240 | |
| 241 | /* Flush needed, clear the bit */ |
| 242 | btr \scratch_reg, THIS_CPU_user_pcid_flush_mask |
| 243 | movq \scratch_reg2, \scratch_reg |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 244 | jmp .Lwrcr3_pcid_\@ |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 245 | |
| 246 | .Lnoflush_\@: |
| 247 | movq \scratch_reg2, \scratch_reg |
| 248 | SET_NOFLUSH_BIT \scratch_reg |
| 249 | |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 250 | .Lwrcr3_pcid_\@: |
| 251 | /* Flip the ASID to the user version */ |
| 252 | orq $(PTI_USER_PCID_MASK), \scratch_reg |
| 253 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 254 | .Lwrcr3_\@: |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 255 | /* Flip the PGD to the user version */ |
| 256 | orq $(PTI_USER_PGTABLE_MASK), \scratch_reg |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 257 | mov \scratch_reg, %cr3 |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 258 | .Lend_\@: |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 259 | .endm |
| 260 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 261 | .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req |
| 262 | pushq %rax |
| 263 | SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax |
| 264 | popq %rax |
| 265 | .endm |
| 266 | |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 267 | .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 268 | ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 269 | movq %cr3, \scratch_reg |
| 270 | movq \scratch_reg, \save_reg |
| 271 | /* |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 272 | * Test the user pagetable bit. If set, then the user page tables |
| 273 | * are active. If clear CR3 already has the kernel page table |
| 274 | * active. |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 275 | */ |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 276 | bt $PTI_USER_PGTABLE_BIT, \scratch_reg |
| 277 | jnc .Ldone_\@ |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 278 | |
| 279 | ADJUST_KERNEL_CR3 \scratch_reg |
| 280 | movq \scratch_reg, %cr3 |
| 281 | |
| 282 | .Ldone_\@: |
| 283 | .endm |
| 284 | |
Peter Zijlstra | 21e9445 | 2017-12-04 15:08:00 +0100 | [diff] [blame] | 285 | .macro RESTORE_CR3 scratch_reg:req save_reg:req |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 286 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI |
Peter Zijlstra | 21e9445 | 2017-12-04 15:08:00 +0100 | [diff] [blame] | 287 | |
| 288 | ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID |
| 289 | |
| 290 | /* |
| 291 | * KERNEL pages can always resume with NOFLUSH as we do |
| 292 | * explicit flushes. |
| 293 | */ |
Thomas Gleixner | f10ee3d | 2018-01-14 00:23:57 +0100 | [diff] [blame] | 294 | bt $PTI_USER_PGTABLE_BIT, \save_reg |
Peter Zijlstra | 21e9445 | 2017-12-04 15:08:00 +0100 | [diff] [blame] | 295 | jnc .Lnoflush_\@ |
| 296 | |
| 297 | /* |
| 298 | * Check if there's a pending flush for the user ASID we're |
| 299 | * about to set. |
| 300 | */ |
| 301 | movq \save_reg, \scratch_reg |
| 302 | andq $(0x7FF), \scratch_reg |
| 303 | bt \scratch_reg, THIS_CPU_user_pcid_flush_mask |
| 304 | jnc .Lnoflush_\@ |
| 305 | |
| 306 | btr \scratch_reg, THIS_CPU_user_pcid_flush_mask |
| 307 | jmp .Lwrcr3_\@ |
| 308 | |
| 309 | .Lnoflush_\@: |
| 310 | SET_NOFLUSH_BIT \save_reg |
| 311 | |
| 312 | .Lwrcr3_\@: |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 313 | /* |
| 314 | * The CR3 write could be avoided when not changing its value, |
| 315 | * but would require a CR3 read *and* a scratch register. |
| 316 | */ |
| 317 | movq \save_reg, %cr3 |
Thomas Gleixner | aa8c624 | 2017-12-04 15:07:36 +0100 | [diff] [blame] | 318 | .Lend_\@: |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 319 | .endm |
| 320 | |
| 321 | #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */ |
| 322 | |
| 323 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req |
| 324 | .endm |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 325 | .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req |
| 326 | .endm |
| 327 | .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 328 | .endm |
| 329 | .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req |
| 330 | .endm |
Peter Zijlstra | 21e9445 | 2017-12-04 15:08:00 +0100 | [diff] [blame] | 331 | .macro RESTORE_CR3 scratch_reg:req save_reg:req |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 332 | .endm |
| 333 | |
| 334 | #endif |
| 335 | |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 336 | #endif /* CONFIG_X86_64 */ |
| 337 | |
Andy Lutomirski | 478dc89 | 2015-11-12 12:59:04 -0800 | [diff] [blame] | 338 | /* |
| 339 | * This does 'call enter_from_user_mode' unless we can avoid it based on |
| 340 | * kernel config or using the static jump infrastructure. |
| 341 | */ |
| 342 | .macro CALL_enter_from_user_mode |
| 343 | #ifdef CONFIG_CONTEXT_TRACKING |
| 344 | #ifdef HAVE_JUMP_LABEL |
| 345 | STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 |
| 346 | #endif |
| 347 | call enter_from_user_mode |
| 348 | .Lafter_call_\@: |
| 349 | #endif |
| 350 | .endm |