Sean Christopherson | 453eafb | 2018-12-20 12:25:17 -0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #include <linux/linkage.h> |
| 3 | #include <asm/asm.h> |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 4 | #include <asm/bitsperlong.h> |
| 5 | #include <asm/kvm_vcpu_regs.h> |
Rick Edgecombe | f2fde6a | 2019-04-26 17:23:58 -0700 | [diff] [blame] | 6 | #include <asm/nospec-branch.h> |
Sean Christopherson | 535f7ef | 2020-09-15 12:15:04 -0700 | [diff] [blame] | 7 | #include <asm/segment.h> |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 8 | |
| 9 | #define WORD_SIZE (BITS_PER_LONG / 8) |
| 10 | |
| 11 | #define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE |
| 12 | #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE |
| 13 | #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE |
| 14 | #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE |
| 15 | /* Intentionally omit RSP as it's context switched by hardware */ |
| 16 | #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE |
| 17 | #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE |
| 18 | #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE |
| 19 | |
| 20 | #ifdef CONFIG_X86_64 |
| 21 | #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE |
| 22 | #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE |
| 23 | #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE |
| 24 | #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE |
| 25 | #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE |
| 26 | #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE |
| 27 | #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE |
| 28 | #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE |
| 29 | #endif |
Sean Christopherson | 453eafb | 2018-12-20 12:25:17 -0800 | [diff] [blame] | 30 | |
Thomas Gleixner | 3ebccdf | 2020-07-08 21:51:57 +0200 | [diff] [blame] | 31 | .section .noinstr.text, "ax" |
Sean Christopherson | 453eafb | 2018-12-20 12:25:17 -0800 | [diff] [blame] | 32 | |
| 33 | /** |
| 34 | * vmx_vmenter - VM-Enter the current loaded VMCS |
| 35 | * |
| 36 | * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME |
| 37 | * |
| 38 | * Returns: |
| 39 | * %RFLAGS.CF is set on VM-Fail Invalid |
| 40 | * %RFLAGS.ZF is set on VM-Fail Valid |
| 41 | * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit |
| 42 | * |
| 43 | * Note that VMRESUME/VMLAUNCH fall-through and return directly if |
| 44 | * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump |
| 45 | * to vmx_vmexit. |
| 46 | */ |
Uros Bizjak | 150f17b | 2020-12-30 16:26:57 -0800 | [diff] [blame] | 47 | SYM_FUNC_START_LOCAL(vmx_vmenter) |
Sean Christopherson | 453eafb | 2018-12-20 12:25:17 -0800 | [diff] [blame] | 48 | /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ |
| 49 | je 2f |
| 50 | |
| 51 | 1: vmresume |
| 52 | ret |
| 53 | |
| 54 | 2: vmlaunch |
| 55 | ret |
| 56 | |
| 57 | 3: cmpb $0, kvm_rebooting |
Josh Poimboeuf | 19f2d8f | 2019-07-17 20:36:38 -0500 | [diff] [blame] | 58 | je 4f |
| 59 | ret |
| 60 | 4: ud2 |
Sean Christopherson | 453eafb | 2018-12-20 12:25:17 -0800 | [diff] [blame] | 61 | |
Uros Bizjak | da7e423 | 2020-04-06 22:21:08 +0200 | [diff] [blame] | 62 | _ASM_EXTABLE(1b, 3b) |
| 63 | _ASM_EXTABLE(2b, 3b) |
Sean Christopherson | 453eafb | 2018-12-20 12:25:17 -0800 | [diff] [blame] | 64 | |
Jiri Slaby | 6dcc562 | 2019-10-11 13:51:04 +0200 | [diff] [blame] | 65 | SYM_FUNC_END(vmx_vmenter) |
Sean Christopherson | 453eafb | 2018-12-20 12:25:17 -0800 | [diff] [blame] | 66 | |
| 67 | /** |
| 68 | * vmx_vmexit - Handle a VMX VM-Exit |
| 69 | * |
| 70 | * Returns: |
| 71 | * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit |
| 72 | * |
| 73 | * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump |
| 74 | * here after hardware loads the host's state, i.e. this is the destination |
| 75 | * referred to by VMCS.HOST_RIP. |
| 76 | */ |
Jiri Slaby | 6dcc562 | 2019-10-11 13:51:04 +0200 | [diff] [blame] | 77 | SYM_FUNC_START(vmx_vmexit) |
Rick Edgecombe | f2fde6a | 2019-04-26 17:23:58 -0700 | [diff] [blame] | 78 | #ifdef CONFIG_RETPOLINE |
| 79 | ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE |
| 80 | /* Preserve guest's RAX, it's used to stuff the RSB. */ |
| 81 | push %_ASM_AX |
| 82 | |
| 83 | /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ |
| 84 | FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE |
| 85 | |
Sean Christopherson | c7cb2d6 | 2020-05-05 20:53:55 -0700 | [diff] [blame] | 86 | /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */ |
| 87 | or $1, %_ASM_AX |
| 88 | |
Rick Edgecombe | f2fde6a | 2019-04-26 17:23:58 -0700 | [diff] [blame] | 89 | pop %_ASM_AX |
| 90 | .Lvmexit_skip_rsb: |
| 91 | #endif |
Sean Christopherson | 453eafb | 2018-12-20 12:25:17 -0800 | [diff] [blame] | 92 | ret |
Jiri Slaby | 6dcc562 | 2019-10-11 13:51:04 +0200 | [diff] [blame] | 93 | SYM_FUNC_END(vmx_vmexit) |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 94 | |
| 95 | /** |
Sean Christopherson | ee2fc63 | 2019-01-25 07:41:14 -0800 | [diff] [blame] | 96 | * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode |
Sean Christopherson | b6852ae | 2019-08-15 13:09:31 -0700 | [diff] [blame] | 97 | * @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp) |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 98 | * @regs: unsigned long * (to guest registers) |
Sean Christopherson | 77df549 | 2019-01-25 07:41:16 -0800 | [diff] [blame] | 99 | * @launched: %true if the VMCS has been launched |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 100 | * |
| 101 | * Returns: |
Sean Christopherson | e75c3c3 | 2019-01-25 07:41:17 -0800 | [diff] [blame] | 102 | * 0 on VM-Exit, 1 on VM-Fail |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 103 | */ |
Jiri Slaby | 6dcc562 | 2019-10-11 13:51:04 +0200 | [diff] [blame] | 104 | SYM_FUNC_START(__vmx_vcpu_run) |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 105 | push %_ASM_BP |
| 106 | mov %_ASM_SP, %_ASM_BP |
Sean Christopherson | 3b895ef | 2019-01-25 07:41:18 -0800 | [diff] [blame] | 107 | #ifdef CONFIG_X86_64 |
| 108 | push %r15 |
| 109 | push %r14 |
| 110 | push %r13 |
| 111 | push %r12 |
| 112 | #else |
| 113 | push %edi |
| 114 | push %esi |
| 115 | #endif |
| 116 | push %_ASM_BX |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 117 | |
| 118 | /* |
| 119 | * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and |
| 120 | * @regs is needed after VM-Exit to save the guest's register values. |
| 121 | */ |
| 122 | push %_ASM_ARG2 |
| 123 | |
Sean Christopherson | 77df549 | 2019-01-25 07:41:16 -0800 | [diff] [blame] | 124 | /* Copy @launched to BL, _ASM_ARG3 is volatile. */ |
| 125 | mov %_ASM_ARG3B, %bl |
| 126 | |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 127 | /* Adjust RSP to account for the CALL to vmx_vmenter(). */ |
| 128 | lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2 |
| 129 | call vmx_update_host_rsp |
| 130 | |
Sean Christopherson | a62fd5a | 2019-01-25 07:41:15 -0800 | [diff] [blame] | 131 | /* Load @regs to RAX. */ |
| 132 | mov (%_ASM_SP), %_ASM_AX |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 133 | |
| 134 | /* Check if vmlaunch or vmresume is needed */ |
Uros Bizjak | 6c44221 | 2020-10-29 15:04:57 +0100 | [diff] [blame] | 135 | testb %bl, %bl |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 136 | |
| 137 | /* Load guest registers. Don't clobber flags. */ |
Sean Christopherson | a62fd5a | 2019-01-25 07:41:15 -0800 | [diff] [blame] | 138 | mov VCPU_RCX(%_ASM_AX), %_ASM_CX |
| 139 | mov VCPU_RDX(%_ASM_AX), %_ASM_DX |
Uros Bizjak | bb03911 | 2020-03-10 18:10:24 +0100 | [diff] [blame] | 140 | mov VCPU_RBX(%_ASM_AX), %_ASM_BX |
| 141 | mov VCPU_RBP(%_ASM_AX), %_ASM_BP |
Sean Christopherson | a62fd5a | 2019-01-25 07:41:15 -0800 | [diff] [blame] | 142 | mov VCPU_RSI(%_ASM_AX), %_ASM_SI |
| 143 | mov VCPU_RDI(%_ASM_AX), %_ASM_DI |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 144 | #ifdef CONFIG_X86_64 |
Sean Christopherson | a62fd5a | 2019-01-25 07:41:15 -0800 | [diff] [blame] | 145 | mov VCPU_R8 (%_ASM_AX), %r8 |
| 146 | mov VCPU_R9 (%_ASM_AX), %r9 |
| 147 | mov VCPU_R10(%_ASM_AX), %r10 |
| 148 | mov VCPU_R11(%_ASM_AX), %r11 |
| 149 | mov VCPU_R12(%_ASM_AX), %r12 |
| 150 | mov VCPU_R13(%_ASM_AX), %r13 |
| 151 | mov VCPU_R14(%_ASM_AX), %r14 |
| 152 | mov VCPU_R15(%_ASM_AX), %r15 |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 153 | #endif |
Sean Christopherson | b6852ae | 2019-08-15 13:09:31 -0700 | [diff] [blame] | 154 | /* Load guest RAX. This kills the @regs pointer! */ |
Sean Christopherson | a62fd5a | 2019-01-25 07:41:15 -0800 | [diff] [blame] | 155 | mov VCPU_RAX(%_ASM_AX), %_ASM_AX |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 156 | |
| 157 | /* Enter guest mode */ |
| 158 | call vmx_vmenter |
| 159 | |
| 160 | /* Jump on VM-Fail. */ |
| 161 | jbe 2f |
| 162 | |
Sean Christopherson | a62fd5a | 2019-01-25 07:41:15 -0800 | [diff] [blame] | 163 | /* Temporarily save guest's RAX. */ |
| 164 | push %_ASM_AX |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 165 | |
Sean Christopherson | a62fd5a | 2019-01-25 07:41:15 -0800 | [diff] [blame] | 166 | /* Reload @regs to RAX. */ |
| 167 | mov WORD_SIZE(%_ASM_SP), %_ASM_AX |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 168 | |
Sean Christopherson | a62fd5a | 2019-01-25 07:41:15 -0800 | [diff] [blame] | 169 | /* Save all guest registers, including RAX from the stack */ |
Uros Bizjak | c16312f | 2020-04-27 22:50:35 +0200 | [diff] [blame] | 170 | pop VCPU_RAX(%_ASM_AX) |
| 171 | mov %_ASM_CX, VCPU_RCX(%_ASM_AX) |
| 172 | mov %_ASM_DX, VCPU_RDX(%_ASM_AX) |
| 173 | mov %_ASM_BX, VCPU_RBX(%_ASM_AX) |
| 174 | mov %_ASM_BP, VCPU_RBP(%_ASM_AX) |
| 175 | mov %_ASM_SI, VCPU_RSI(%_ASM_AX) |
| 176 | mov %_ASM_DI, VCPU_RDI(%_ASM_AX) |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 177 | #ifdef CONFIG_X86_64 |
Sean Christopherson | a62fd5a | 2019-01-25 07:41:15 -0800 | [diff] [blame] | 178 | mov %r8, VCPU_R8 (%_ASM_AX) |
| 179 | mov %r9, VCPU_R9 (%_ASM_AX) |
| 180 | mov %r10, VCPU_R10(%_ASM_AX) |
| 181 | mov %r11, VCPU_R11(%_ASM_AX) |
| 182 | mov %r12, VCPU_R12(%_ASM_AX) |
| 183 | mov %r13, VCPU_R13(%_ASM_AX) |
| 184 | mov %r14, VCPU_R14(%_ASM_AX) |
| 185 | mov %r15, VCPU_R15(%_ASM_AX) |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 186 | #endif |
| 187 | |
Sean Christopherson | e75c3c3 | 2019-01-25 07:41:17 -0800 | [diff] [blame] | 188 | /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */ |
| 189 | xor %eax, %eax |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 190 | |
| 191 | /* |
Sean Christopherson | e75c3c3 | 2019-01-25 07:41:17 -0800 | [diff] [blame] | 192 | * Clear all general purpose registers except RSP and RAX to prevent |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 193 | * speculative use of the guest's values, even those that are reloaded |
| 194 | * via the stack. In theory, an L1 cache miss when restoring registers |
| 195 | * could lead to speculative execution with the guest's values. |
| 196 | * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially |
Sean Christopherson | e75c3c3 | 2019-01-25 07:41:17 -0800 | [diff] [blame] | 197 | * free. RSP and RAX are exempt as RSP is restored by hardware during |
| 198 | * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail. |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 199 | */ |
Uros Bizjak | bb03911 | 2020-03-10 18:10:24 +0100 | [diff] [blame] | 200 | 1: xor %ecx, %ecx |
Sean Christopherson | 4f44c4e | 2019-01-25 07:41:20 -0800 | [diff] [blame] | 201 | xor %edx, %edx |
Uros Bizjak | bb03911 | 2020-03-10 18:10:24 +0100 | [diff] [blame] | 202 | xor %ebx, %ebx |
| 203 | xor %ebp, %ebp |
Sean Christopherson | 4f44c4e | 2019-01-25 07:41:20 -0800 | [diff] [blame] | 204 | xor %esi, %esi |
| 205 | xor %edi, %edi |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 206 | #ifdef CONFIG_X86_64 |
| 207 | xor %r8d, %r8d |
| 208 | xor %r9d, %r9d |
| 209 | xor %r10d, %r10d |
| 210 | xor %r11d, %r11d |
| 211 | xor %r12d, %r12d |
| 212 | xor %r13d, %r13d |
| 213 | xor %r14d, %r14d |
| 214 | xor %r15d, %r15d |
| 215 | #endif |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 216 | |
| 217 | /* "POP" @regs. */ |
| 218 | add $WORD_SIZE, %_ASM_SP |
Sean Christopherson | 3b895ef | 2019-01-25 07:41:18 -0800 | [diff] [blame] | 219 | pop %_ASM_BX |
| 220 | |
| 221 | #ifdef CONFIG_X86_64 |
| 222 | pop %r12 |
| 223 | pop %r13 |
| 224 | pop %r14 |
| 225 | pop %r15 |
| 226 | #else |
| 227 | pop %esi |
| 228 | pop %edi |
| 229 | #endif |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 230 | pop %_ASM_BP |
| 231 | ret |
| 232 | |
| 233 | /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ |
Sean Christopherson | e75c3c3 | 2019-01-25 07:41:17 -0800 | [diff] [blame] | 234 | 2: mov $1, %eax |
Sean Christopherson | 5e0781d | 2019-01-25 07:41:12 -0800 | [diff] [blame] | 235 | jmp 1b |
Jiri Slaby | 6dcc562 | 2019-10-11 13:51:04 +0200 | [diff] [blame] | 236 | SYM_FUNC_END(__vmx_vcpu_run) |
Sean Christopherson | 842f4be | 2020-03-26 09:07:12 -0700 | [diff] [blame] | 237 | |
Thomas Gleixner | 3ebccdf | 2020-07-08 21:51:57 +0200 | [diff] [blame] | 238 | |
| 239 | .section .text, "ax" |
| 240 | |
Sean Christopherson | 842f4be | 2020-03-26 09:07:12 -0700 | [diff] [blame] | 241 | /** |
| 242 | * vmread_error_trampoline - Trampoline from inline asm to vmread_error() |
| 243 | * @field: VMCS field encoding that failed |
| 244 | * @fault: %true if the VMREAD faulted, %false if it failed |
| 245 | |
| 246 | * Save and restore volatile registers across a call to vmread_error(). Note, |
| 247 | * all parameters are passed on the stack. |
| 248 | */ |
| 249 | SYM_FUNC_START(vmread_error_trampoline) |
| 250 | push %_ASM_BP |
| 251 | mov %_ASM_SP, %_ASM_BP |
| 252 | |
| 253 | push %_ASM_AX |
| 254 | push %_ASM_CX |
| 255 | push %_ASM_DX |
| 256 | #ifdef CONFIG_X86_64 |
| 257 | push %rdi |
| 258 | push %rsi |
| 259 | push %r8 |
| 260 | push %r9 |
| 261 | push %r10 |
| 262 | push %r11 |
| 263 | #endif |
| 264 | #ifdef CONFIG_X86_64 |
| 265 | /* Load @field and @fault to arg1 and arg2 respectively. */ |
| 266 | mov 3*WORD_SIZE(%rbp), %_ASM_ARG2 |
| 267 | mov 2*WORD_SIZE(%rbp), %_ASM_ARG1 |
| 268 | #else |
| 269 | /* Parameters are passed on the stack for 32-bit (see asmlinkage). */ |
| 270 | push 3*WORD_SIZE(%ebp) |
| 271 | push 2*WORD_SIZE(%ebp) |
| 272 | #endif |
| 273 | |
| 274 | call vmread_error |
| 275 | |
| 276 | #ifndef CONFIG_X86_64 |
| 277 | add $8, %esp |
| 278 | #endif |
| 279 | |
| 280 | /* Zero out @fault, which will be popped into the result register. */ |
| 281 | _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP) |
| 282 | |
| 283 | #ifdef CONFIG_X86_64 |
| 284 | pop %r11 |
| 285 | pop %r10 |
| 286 | pop %r9 |
| 287 | pop %r8 |
| 288 | pop %rsi |
| 289 | pop %rdi |
| 290 | #endif |
| 291 | pop %_ASM_DX |
| 292 | pop %_ASM_CX |
| 293 | pop %_ASM_AX |
| 294 | pop %_ASM_BP |
| 295 | |
| 296 | ret |
| 297 | SYM_FUNC_END(vmread_error_trampoline) |
Sean Christopherson | 535f7ef | 2020-09-15 12:15:04 -0700 | [diff] [blame] | 298 | |
| 299 | SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) |
| 300 | /* |
| 301 | * Unconditionally create a stack frame, getting the correct RSP on the |
| 302 | * stack (for x86-64) would take two instructions anyways, and RBP can |
| 303 | * be used to restore RSP to make objtool happy (see below). |
| 304 | */ |
| 305 | push %_ASM_BP |
| 306 | mov %_ASM_SP, %_ASM_BP |
| 307 | |
| 308 | #ifdef CONFIG_X86_64 |
| 309 | /* |
| 310 | * Align RSP to a 16-byte boundary (to emulate CPU behavior) before |
| 311 | * creating the synthetic interrupt stack frame for the IRQ/NMI. |
| 312 | */ |
| 313 | and $-16, %rsp |
| 314 | push $__KERNEL_DS |
| 315 | push %rbp |
| 316 | #endif |
| 317 | pushf |
| 318 | push $__KERNEL_CS |
| 319 | CALL_NOSPEC _ASM_ARG1 |
| 320 | |
| 321 | /* |
| 322 | * "Restore" RSP from RBP, even though IRET has already unwound RSP to |
| 323 | * the correct value. objtool doesn't know the callee will IRET and, |
| 324 | * without the explicit restore, thinks the stack is getting walloped. |
| 325 | * Using an unwind hint is problematic due to x86-64's dynamic alignment. |
| 326 | */ |
| 327 | mov %_ASM_BP, %_ASM_SP |
| 328 | pop %_ASM_BP |
| 329 | ret |
| 330 | SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff) |