blob: 53cf8aa35032d69948093f97c1f0836a3bc4075c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -08002/*
Juergen Grossedcb5cf2017-08-16 19:31:56 +02003 * Asm versions of Xen pv-ops, suitable for direct use.
Tejun Heo130ace12009-02-06 00:57:48 +09004 *
5 * We only bother with direct forms (ie, vcpu in percpu data) of the
Juergen Grossedcb5cf2017-08-16 19:31:56 +02006 * operations here; the indirect forms are better handled in C.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -08007 */
8
Juergen Gross56415c42020-07-03 09:16:19 +02009#include <asm/errno.h>
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080010#include <asm/asm-offsets.h>
11#include <asm/percpu.h>
12#include <asm/processor-flags.h>
Juergen Gross56415c42020-07-03 09:16:19 +020013#include <asm/segment.h>
14#include <asm/thread_info.h>
Peter Zijlstra55aeddd2019-07-11 13:40:55 +020015#include <asm/asm.h>
Juergen Gross56415c42020-07-03 09:16:19 +020016#include <asm/frame.h>
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080017
Juergen Gross56415c42020-07-03 09:16:19 +020018#include <xen/interface/xen.h>
19
20#include <linux/init.h>
Juergen Grossedcb5cf2017-08-16 19:31:56 +020021#include <linux/linkage.h>
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080022
23/*
Tejun Heo130ace12009-02-06 00:57:48 +090024 * Enable events. This clears the event mask and tests the pending
25 * event status with one and operation. If there are pending events,
26 * then enter the hypervisor to get them handled.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080027 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +020028SYM_FUNC_START(xen_irq_enable_direct)
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060029 FRAME_BEGIN
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080030 /* Unmask events */
31 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
32
Tejun Heo130ace12009-02-06 00:57:48 +090033 /*
34 * Preempt here doesn't matter because that will deal with any
35 * pending interrupts. The pending check may end up being run
36 * on the wrong CPU, but that doesn't hurt.
37 */
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080038
39 /* Test for pending */
40 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
41 jz 1f
42
Juergen Grossedcb5cf2017-08-16 19:31:56 +020043 call check_events
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800441:
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060045 FRAME_END
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080046 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +020047SYM_FUNC_END(xen_irq_enable_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080048
49
50/*
Tejun Heo130ace12009-02-06 00:57:48 +090051 * Disabling events is simply a matter of making the event mask
52 * non-zero.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080053 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +020054SYM_FUNC_START(xen_irq_disable_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080055 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080056 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +020057SYM_FUNC_END(xen_irq_disable_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080058
59/*
Tejun Heo130ace12009-02-06 00:57:48 +090060 * (xen_)save_fl is used to get the current interrupt enable status.
61 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
62 * may be set in the return value. We take advantage of this by
63 * making sure that X86_EFLAGS_IF has the right value (and other bits
64 * in that byte are 0), but other bits in the return value are
65 * undefined. We need to toggle the state of the bit, because Xen and
66 * x86 use opposite senses (mask vs enable).
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080067 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +020068SYM_FUNC_START(xen_save_fl_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080069 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
70 setz %ah
Tejun Heo130ace12009-02-06 00:57:48 +090071 addb %ah, %ah
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080072 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +020073SYM_FUNC_END(xen_save_fl_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080074
75
76/*
Tejun Heo130ace12009-02-06 00:57:48 +090077 * In principle the caller should be passing us a value return from
78 * xen_save_fl_direct, but for robustness sake we test only the
79 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
80 * interrupt mask state, it checks for unmasked pending events and
81 * enters the hypervisor to get them delivered if so.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080082 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +020083SYM_FUNC_START(xen_restore_fl_direct)
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060084 FRAME_BEGIN
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080085 testw $X86_EFLAGS_IF, %di
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080086 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
Tejun Heo130ace12009-02-06 00:57:48 +090087 /*
88 * Preempt here doesn't matter because that will deal with any
89 * pending interrupts. The pending check may end up being run
90 * on the wrong CPU, but that doesn't hurt.
91 */
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080092
93 /* check for unmasked and pending */
94 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
David Vrabel7eb7ce42012-04-26 19:44:06 +010095 jnz 1f
Juergen Grossedcb5cf2017-08-16 19:31:56 +020096 call check_events
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800971:
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -060098 FRAME_END
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -080099 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200100SYM_FUNC_END(xen_restore_fl_direct)
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800101
102
103/*
Tejun Heo130ace12009-02-06 00:57:48 +0900104 * Force an event check by making a hypercall, but preserve regs
105 * before making the call.
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800106 */
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200107SYM_FUNC_START(check_events)
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -0600108 FRAME_BEGIN
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800109 push %rax
110 push %rcx
111 push %rdx
112 push %rsi
113 push %rdi
114 push %r8
115 push %r9
116 push %r10
117 push %r11
118 call xen_force_evtchn_callback
119 pop %r11
120 pop %r10
121 pop %r9
122 pop %r8
123 pop %rdi
124 pop %rsi
125 pop %rdx
126 pop %rcx
127 pop %rax
Josh Poimboeuf8be0eb72016-01-21 16:49:11 -0600128 FRAME_END
Jeremy Fitzhardinge53937442009-02-02 13:55:42 -0800129 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200130SYM_FUNC_END(check_events)
Peter Zijlstra55aeddd2019-07-11 13:40:55 +0200131
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200132SYM_FUNC_START(xen_read_cr2)
Peter Zijlstra55aeddd2019-07-11 13:40:55 +0200133 FRAME_BEGIN
134 _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
135 _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
136 FRAME_END
137 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200138SYM_FUNC_END(xen_read_cr2);
Peter Zijlstra55aeddd2019-07-11 13:40:55 +0200139
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200140SYM_FUNC_START(xen_read_cr2_direct)
Peter Zijlstra55aeddd2019-07-11 13:40:55 +0200141 FRAME_BEGIN
142 _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
143 FRAME_END
144 ret
Jiri Slaby6dcc5622019-10-11 13:51:04 +0200145SYM_FUNC_END(xen_read_cr2_direct);
Juergen Gross56415c42020-07-03 09:16:19 +0200146
147.macro xen_pv_trap name
148SYM_CODE_START(xen_\name)
149 pop %rcx
150 pop %r11
151 jmp \name
152SYM_CODE_END(xen_\name)
153_ASM_NOKPROBE(xen_\name)
154.endm
155
156xen_pv_trap asm_exc_divide_error
157xen_pv_trap asm_xenpv_exc_debug
158xen_pv_trap asm_exc_int3
159xen_pv_trap asm_xenpv_exc_nmi
160xen_pv_trap asm_exc_overflow
161xen_pv_trap asm_exc_bounds
162xen_pv_trap asm_exc_invalid_op
163xen_pv_trap asm_exc_device_not_available
164xen_pv_trap asm_exc_double_fault
165xen_pv_trap asm_exc_coproc_segment_overrun
166xen_pv_trap asm_exc_invalid_tss
167xen_pv_trap asm_exc_segment_not_present
168xen_pv_trap asm_exc_stack_segment
169xen_pv_trap asm_exc_general_protection
170xen_pv_trap asm_exc_page_fault
171xen_pv_trap asm_exc_spurious_interrupt_bug
172xen_pv_trap asm_exc_coprocessor_error
173xen_pv_trap asm_exc_alignment_check
174#ifdef CONFIG_X86_MCE
175xen_pv_trap asm_exc_machine_check
176#endif /* CONFIG_X86_MCE */
177xen_pv_trap asm_exc_simd_coprocessor_error
178#ifdef CONFIG_IA32_EMULATION
179xen_pv_trap entry_INT80_compat
180#endif
Juergen Gross2e924932021-01-25 14:42:07 +0100181xen_pv_trap asm_exc_xen_unknown_trap
Juergen Gross56415c42020-07-03 09:16:19 +0200182xen_pv_trap asm_exc_xen_hypervisor_callback
183
184 __INIT
185SYM_CODE_START(xen_early_idt_handler_array)
186 i = 0
187 .rept NUM_EXCEPTION_VECTORS
188 pop %rcx
189 pop %r11
190 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
191 i = i + 1
192 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
193 .endr
194SYM_CODE_END(xen_early_idt_handler_array)
195 __FINIT
196
197hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
198/*
199 * Xen64 iret frame:
200 *
201 * ss
202 * rsp
203 * rflags
204 * cs
205 * rip <-- standard iret frame
206 *
207 * flags
208 *
209 * rcx }
210 * r11 }<-- pushed by hypercall page
211 * rsp->rax }
212 */
213SYM_CODE_START(xen_iret)
214 pushq $0
215 jmp hypercall_iret
216SYM_CODE_END(xen_iret)
217
218SYM_CODE_START(xen_sysret64)
219 /*
220 * We're already on the usermode stack at this point, but
221 * still with the kernel gs, so we can easily switch back.
222 *
223 * tss.sp2 is scratch space.
224 */
225 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
226 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
227
228 pushq $__USER_DS
229 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
230 pushq %r11
231 pushq $__USER_CS
232 pushq %rcx
233
234 pushq $VGCF_in_syscall
235 jmp hypercall_iret
236SYM_CODE_END(xen_sysret64)
237
238/*
239 * Xen handles syscall callbacks much like ordinary exceptions, which
240 * means we have:
241 * - kernel gs
242 * - kernel rsp
243 * - an iret-like stack frame on the stack (including rcx and r11):
244 * ss
245 * rsp
246 * rflags
247 * cs
248 * rip
249 * r11
250 * rsp->rcx
251 */
252
253/* Normal 64-bit system call target */
254SYM_FUNC_START(xen_syscall_target)
255 popq %rcx
256 popq %r11
257
258 /*
259 * Neither Xen nor the kernel really knows what the old SS and
260 * CS were. The kernel expects __USER_DS and __USER_CS, so
261 * report those values even though Xen will guess its own values.
262 */
263 movq $__USER_DS, 4*8(%rsp)
264 movq $__USER_CS, 1*8(%rsp)
265
266 jmp entry_SYSCALL_64_after_hwframe
267SYM_FUNC_END(xen_syscall_target)
268
269#ifdef CONFIG_IA32_EMULATION
270
271/* 32-bit compat syscall target */
272SYM_FUNC_START(xen_syscall32_target)
273 popq %rcx
274 popq %r11
275
276 /*
277 * Neither Xen nor the kernel really knows what the old SS and
278 * CS were. The kernel expects __USER32_DS and __USER32_CS, so
279 * report those values even though Xen will guess its own values.
280 */
281 movq $__USER32_DS, 4*8(%rsp)
282 movq $__USER32_CS, 1*8(%rsp)
283
284 jmp entry_SYSCALL_compat_after_hwframe
285SYM_FUNC_END(xen_syscall32_target)
286
287/* 32-bit compat sysenter target */
288SYM_FUNC_START(xen_sysenter_target)
289 /*
290 * NB: Xen is polite and clears TF from EFLAGS for us. This means
291 * that we don't need to guard against single step exceptions here.
292 */
293 popq %rcx
294 popq %r11
295
296 /*
297 * Neither Xen nor the kernel really knows what the old SS and
298 * CS were. The kernel expects __USER32_DS and __USER32_CS, so
299 * report those values even though Xen will guess its own values.
300 */
301 movq $__USER32_DS, 4*8(%rsp)
302 movq $__USER32_CS, 1*8(%rsp)
303
304 jmp entry_SYSENTER_compat_after_hwframe
305SYM_FUNC_END(xen_sysenter_target)
306
307#else /* !CONFIG_IA32_EMULATION */
308
309SYM_FUNC_START_ALIAS(xen_syscall32_target)
310SYM_FUNC_START(xen_sysenter_target)
311 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
312 mov $-ENOSYS, %rax
313 pushq $0
314 jmp hypercall_iret
315SYM_FUNC_END(xen_sysenter_target)
316SYM_FUNC_END_ALIAS(xen_syscall32_target)
317
318#endif /* CONFIG_IA32_EMULATION */