blob: 1e626444712bedb40aa8ae214f0003b1eee37232 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -08002/*
Juergen Grossedcb5cf2017-08-16 19:31:56 +02003 * Asm versions of Xen pv-ops, suitable for direct use.
Tejun Heo130ace12009-02-06 00:57:48 +09004 *
5 * We only bother with direct forms (ie, vcpu in percpu data) of the
Juergen Grossedcb5cf2017-08-16 19:31:56 +02006 * operations here; the indirect forms are better handled in C.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -08007 */
8
Juergen Gross56415c42020-07-03 09:16:19 +02009#include <asm/errno.h>
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080010#include <asm/asm-offsets.h>
11#include <asm/percpu.h>
12#include <asm/processor-flags.h>
Juergen Gross56415c42020-07-03 09:16:19 +020013#include <asm/segment.h>
14#include <asm/thread_info.h>
Peter Zijlstra55aeddd2019-07-11 13:40:55 +020015#include <asm/asm.h>
Juergen Gross56415c42020-07-03 09:16:19 +020016#include <asm/frame.h>
Josh Poimboeufcde07a42021-01-21 15:29:28 -060017#include <asm/unwind_hints.h>
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080018
Juergen Gross56415c42020-07-03 09:16:19 +020019#include <xen/interface/xen.h>
20
21#include <linux/init.h>
Juergen Grossedcb5cf2017-08-16 19:31:56 +020022#include <linux/linkage.h>
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080023
24/*
Tejun Heo130ace12009-02-06 00:57:48 +090025 * Enable events. This clears the event mask and tests the pending
26 * event status with one and operation. If there are pending events,
27 * then enter the hypervisor to get them handled.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080028 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +020029SYM_FUNC_START(xen_irq_enable_direct)
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060030 FRAME_BEGIN
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080031 /* Unmask events */
32 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
33
Tejun Heo130ace12009-02-06 00:57:48 +090034 /*
35 * Preempt here doesn't matter because that will deal with any
36 * pending interrupts. The pending check may end up being run
37 * on the wrong CPU, but that doesn't hurt.
38 */
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080039
40 /* Test for pending */
41 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
42 jz 1f
43
Juergen Grossedcb5cf2017-08-16 19:31:56 +020044 call check_events
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800451:
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060046 FRAME_END
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080047 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +020048SYM_FUNC_END(xen_irq_enable_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080049
50
51/*
Tejun Heo130ace12009-02-06 00:57:48 +090052 * Disabling events is simply a matter of making the event mask
53 * non-zero.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080054 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +020055SYM_FUNC_START(xen_irq_disable_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080056 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080057 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +020058SYM_FUNC_END(xen_irq_disable_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080059
60/*
Tejun Heo130ace12009-02-06 00:57:48 +090061 * (xen_)save_fl is used to get the current interrupt enable status.
62 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
63 * may be set in the return value. We take advantage of this by
64 * making sure that X86_EFLAGS_IF has the right value (and other bits
65 * in that byte are 0), but other bits in the return value are
66 * undefined. We need to toggle the state of the bit, because Xen and
67 * x86 use opposite senses (mask vs enable).
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080068 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +020069SYM_FUNC_START(xen_save_fl_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080070 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
71 setz %ah
Tejun Heo130ace12009-02-06 00:57:48 +090072 addb %ah, %ah
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080073 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +020074SYM_FUNC_END(xen_save_fl_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080075
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080076/*
Tejun Heo130ace12009-02-06 00:57:48 +090077 * Force an event check by making a hypercall, but preserve regs
78 * before making the call.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080079 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +020080SYM_FUNC_START(check_events)
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060081 FRAME_BEGIN
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080082 push %rax
83 push %rcx
84 push %rdx
85 push %rsi
86 push %rdi
87 push %r8
88 push %r9
89 push %r10
90 push %r11
91 call xen_force_evtchn_callback
92 pop %r11
93 pop %r10
94 pop %r9
95 pop %r8
96 pop %rdi
97 pop %rsi
98 pop %rdx
99 pop %rcx
100 pop %rax
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -0600101 FRAME_END
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800102 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200103SYM_FUNC_END(check_events)
Peter Zijlstra55aeddd2019-07-11 13:40:55 +0200104
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200105SYM_FUNC_START(xen_read_cr2)
Peter Zijlstra55aeddd2019-07-11 13:40:55 +0200106 FRAME_BEGIN
107 _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
108 _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
109 FRAME_END
110 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200111SYM_FUNC_END(xen_read_cr2);
Peter Zijlstra55aeddd2019-07-11 13:40:55 +0200112
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200113SYM_FUNC_START(xen_read_cr2_direct)
Peter Zijlstra55aeddd2019-07-11 13:40:55 +0200114 FRAME_BEGIN
115 _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
116 FRAME_END
117 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200118SYM_FUNC_END(xen_read_cr2_direct);
Juergen Gross56415c42020-07-03 09:16:19 +0200119
120.macro xen_pv_trap name
121SYM_CODE_START(xen_\name)
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600122 UNWIND_HINT_EMPTY
Juergen Gross56415c42020-07-03 09:16:19 +0200123 pop %rcx
124 pop %r11
125 jmp \name
126SYM_CODE_END(xen_\name)
127_ASM_NOKPROBE(xen_\name)
128.endm
129
130xen_pv_trap asm_exc_divide_error
131xen_pv_trap asm_xenpv_exc_debug
132xen_pv_trap asm_exc_int3
133xen_pv_trap asm_xenpv_exc_nmi
134xen_pv_trap asm_exc_overflow
135xen_pv_trap asm_exc_bounds
136xen_pv_trap asm_exc_invalid_op
137xen_pv_trap asm_exc_device_not_available
Juergen Gross5b4c6d62021-01-20 14:55:43 +0100138xen_pv_trap asm_xenpv_exc_double_fault
Juergen Gross56415c42020-07-03 09:16:19 +0200139xen_pv_trap asm_exc_coproc_segment_overrun
140xen_pv_trap asm_exc_invalid_tss
141xen_pv_trap asm_exc_segment_not_present
142xen_pv_trap asm_exc_stack_segment
143xen_pv_trap asm_exc_general_protection
144xen_pv_trap asm_exc_page_fault
145xen_pv_trap asm_exc_spurious_interrupt_bug
146xen_pv_trap asm_exc_coprocessor_error
147xen_pv_trap asm_exc_alignment_check
148#ifdef CONFIG_X86_MCE
Juergen Grossc3d7fa62021-01-20 14:55:42 +0100149xen_pv_trap asm_xenpv_exc_machine_check
Juergen Gross56415c42020-07-03 09:16:19 +0200150#endif /* CONFIG_X86_MCE */
151xen_pv_trap asm_exc_simd_coprocessor_error
152#ifdef CONFIG_IA32_EMULATION
153xen_pv_trap entry_INT80_compat
154#endif
Juergen Gross2e924932021-01-25 14:42:07 +0100155xen_pv_trap asm_exc_xen_unknown_trap
Juergen Gross56415c42020-07-03 09:16:19 +0200156xen_pv_trap asm_exc_xen_hypervisor_callback
157
158 __INIT
159SYM_CODE_START(xen_early_idt_handler_array)
160 i = 0
161 .rept NUM_EXCEPTION_VECTORS
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600162 UNWIND_HINT_EMPTY
Juergen Gross56415c42020-07-03 09:16:19 +0200163 pop %rcx
164 pop %r11
165 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
166 i = i + 1
167 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
168 .endr
169SYM_CODE_END(xen_early_idt_handler_array)
170 __FINIT
171
172hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
173/*
174 * Xen64 iret frame:
175 *
176 * ss
177 * rsp
178 * rflags
179 * cs
180 * rip <-- standard iret frame
181 *
182 * flags
183 *
184 * rcx }
185 * r11 }<-- pushed by hypercall page
186 * rsp->rax }
187 */
188SYM_CODE_START(xen_iret)
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600189 UNWIND_HINT_EMPTY
Juergen Gross56415c42020-07-03 09:16:19 +0200190 pushq $0
191 jmp hypercall_iret
192SYM_CODE_END(xen_iret)
193
Juergen Gross56415c42020-07-03 09:16:19 +0200194/*
195 * Xen handles syscall callbacks much like ordinary exceptions, which
196 * means we have:
197 * - kernel gs
198 * - kernel rsp
199 * - an iret-like stack frame on the stack (including rcx and r11):
200 * ss
201 * rsp
202 * rflags
203 * cs
204 * rip
205 * r11
206 * rsp->rcx
207 */
208
209/* Normal 64-bit system call target */
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600210SYM_CODE_START(xen_syscall_target)
211 UNWIND_HINT_EMPTY
Juergen Gross56415c42020-07-03 09:16:19 +0200212 popq %rcx
213 popq %r11
214
215 /*
216 * Neither Xen nor the kernel really knows what the old SS and
217 * CS were. The kernel expects __USER_DS and __USER_CS, so
218 * report those values even though Xen will guess its own values.
219 */
220 movq $__USER_DS, 4*8(%rsp)
221 movq $__USER_CS, 1*8(%rsp)
222
223 jmp entry_SYSCALL_64_after_hwframe
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600224SYM_CODE_END(xen_syscall_target)
Juergen Gross56415c42020-07-03 09:16:19 +0200225
226#ifdef CONFIG_IA32_EMULATION
227
228/* 32-bit compat syscall target */
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600229SYM_CODE_START(xen_syscall32_target)
230 UNWIND_HINT_EMPTY
Juergen Gross56415c42020-07-03 09:16:19 +0200231 popq %rcx
232 popq %r11
233
234 /*
235 * Neither Xen nor the kernel really knows what the old SS and
236 * CS were. The kernel expects __USER32_DS and __USER32_CS, so
237 * report those values even though Xen will guess its own values.
238 */
239 movq $__USER32_DS, 4*8(%rsp)
240 movq $__USER32_CS, 1*8(%rsp)
241
242 jmp entry_SYSCALL_compat_after_hwframe
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600243SYM_CODE_END(xen_syscall32_target)
Juergen Gross56415c42020-07-03 09:16:19 +0200244
245/* 32-bit compat sysenter target */
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600246SYM_CODE_START(xen_sysenter_target)
247 UNWIND_HINT_EMPTY
Juergen Gross56415c42020-07-03 09:16:19 +0200248 /*
249 * NB: Xen is polite and clears TF from EFLAGS for us. This means
250 * that we don't need to guard against single step exceptions here.
251 */
252 popq %rcx
253 popq %r11
254
255 /*
256 * Neither Xen nor the kernel really knows what the old SS and
257 * CS were. The kernel expects __USER32_DS and __USER32_CS, so
258 * report those values even though Xen will guess its own values.
259 */
260 movq $__USER32_DS, 4*8(%rsp)
261 movq $__USER32_CS, 1*8(%rsp)
262
263 jmp entry_SYSENTER_compat_after_hwframe
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600264SYM_CODE_END(xen_sysenter_target)
Juergen Gross56415c42020-07-03 09:16:19 +0200265
266#else /* !CONFIG_IA32_EMULATION */
267
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600268SYM_CODE_START(xen_syscall32_target)
269SYM_CODE_START(xen_sysenter_target)
270 UNWIND_HINT_EMPTY
Juergen Gross56415c42020-07-03 09:16:19 +0200271 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
272 mov $-ENOSYS, %rax
273 pushq $0
274 jmp hypercall_iret
Josh Poimboeufcde07a42021-01-21 15:29:28 -0600275SYM_CODE_END(xen_sysenter_target)
276SYM_CODE_END(xen_syscall32_target)
Juergen Gross56415c42020-07-03 09:16:19 +0200277
278#endif /* CONFIG_IA32_EMULATION */