blob: b414fed1576b000a4b594e82a753f6eb094acb2a [file] [log] [blame]
Joao Martins23200b72018-06-13 09:55:44 -04001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 *
6 * KVM Xen emulation
7 */
8
9#include "x86.h"
10#include "xen.h"
Joao Martins79033be2018-06-13 09:55:44 -040011#include "hyperv.h"
Joao Martins23200b72018-06-13 09:55:44 -040012
13#include <linux/kvm_host.h>
14
15#include <trace/events/kvm.h>
Joao Martins13ffb972018-06-15 21:17:14 -040016#include <xen/interface/xen.h>
Joao Martins23200b72018-06-13 09:55:44 -040017
18#include "trace.h"
19
David Woodhouse7d6bbeb2021-02-02 15:48:05 +000020DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
21
Joao Martins13ffb972018-06-15 21:17:14 -040022static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
23{
Joao Martins629b5342018-06-28 15:06:43 -040024 gpa_t gpa = gfn_to_gpa(gfn);
25 int wc_ofs, sec_hi_ofs;
Joao Martins13ffb972018-06-15 21:17:14 -040026 int ret;
27 int idx = srcu_read_lock(&kvm->srcu);
28
29 ret = kvm_gfn_to_hva_cache_init(kvm, &kvm->arch.xen.shinfo_cache,
Joao Martins629b5342018-06-28 15:06:43 -040030 gpa, PAGE_SIZE);
31 if (ret)
32 goto out;
Joao Martins13ffb972018-06-15 21:17:14 -040033
Joao Martins629b5342018-06-28 15:06:43 -040034 kvm->arch.xen.shinfo_set = true;
35
36 /* Paranoia checks on the 32-bit struct layout */
37 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
38 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
39 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
40
41 /* 32-bit location by default */
42 wc_ofs = offsetof(struct compat_shared_info, wc);
43 sec_hi_ofs = offsetof(struct compat_shared_info, arch.wc_sec_hi);
44
45#ifdef CONFIG_X86_64
46 /* Paranoia checks on the 64-bit struct layout */
47 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
48 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
49
50 if (kvm->arch.xen.long_mode) {
51 wc_ofs = offsetof(struct shared_info, wc);
52 sec_hi_ofs = offsetof(struct shared_info, wc_sec_hi);
53 }
54#endif
55
56 kvm_write_wall_clock(kvm, gpa + wc_ofs, sec_hi_ofs - wc_ofs);
57 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
58
59out:
Joao Martins13ffb972018-06-15 21:17:14 -040060 srcu_read_unlock(&kvm->srcu, idx);
61 return ret;
62}
63
David Woodhouse40da8cc2020-12-09 20:08:30 +000064int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
65{
66 u8 rc = 0;
67
68 /*
69 * If the global upcall vector (HVMIRQ_callback_vector) is set and
70 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
71 */
72 struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache;
73 struct kvm_memslots *slots = kvm_memslots(v->kvm);
74 unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending);
75
76 /* No need for compat handling here */
77 BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
78 offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
79 BUILD_BUG_ON(sizeof(rc) !=
80 sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending));
81 BUILD_BUG_ON(sizeof(rc) !=
82 sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending));
83
84 /*
85 * For efficiency, this mirrors the checks for using the valid
86 * cache in kvm_read_guest_offset_cached(), but just uses
87 * __get_user() instead. And falls back to the slow path.
88 */
89 if (likely(slots->generation == ghc->generation &&
90 !kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
91 /* Fast path */
92 __get_user(rc, (u8 __user *)ghc->hva + offset);
93 } else {
94 /* Slow path */
95 kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
96 sizeof(rc));
97 }
98
99 return rc;
100}
101
Joao Martinsa76b9642020-12-03 15:52:25 +0000102int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
103{
104 int r = -ENOENT;
105
Joao Martins13ffb972018-06-15 21:17:14 -0400106 mutex_lock(&kvm->lock);
107
Joao Martinsa76b9642020-12-03 15:52:25 +0000108 switch (data->type) {
David Woodhousea3833b82020-12-03 16:20:32 +0000109 case KVM_XEN_ATTR_TYPE_LONG_MODE:
Joao Martins13ffb972018-06-15 21:17:14 -0400110 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
111 r = -EINVAL;
112 } else {
113 kvm->arch.xen.long_mode = !!data->u.long_mode;
114 r = 0;
115 }
David Woodhousea3833b82020-12-03 16:20:32 +0000116 break;
Joao Martins13ffb972018-06-15 21:17:14 -0400117
118 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
119 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
120 break;
121
David Woodhouse40da8cc2020-12-09 20:08:30 +0000122
123 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
124 if (data->u.vector < 0x10)
125 r = -EINVAL;
126 else {
127 kvm->arch.xen.upcall_vector = data->u.vector;
128 r = 0;
129 }
130 break;
131
Joao Martinsa76b9642020-12-03 15:52:25 +0000132 default:
133 break;
134 }
135
136 mutex_unlock(&kvm->lock);
137 return r;
138}
139
140int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
141{
142 int r = -ENOENT;
143
144 mutex_lock(&kvm->lock);
145
146 switch (data->type) {
David Woodhousea3833b82020-12-03 16:20:32 +0000147 case KVM_XEN_ATTR_TYPE_LONG_MODE:
148 data->u.long_mode = kvm->arch.xen.long_mode;
149 r = 0;
150 break;
Joao Martins13ffb972018-06-15 21:17:14 -0400151
152 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
153 if (kvm->arch.xen.shinfo_set) {
154 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
155 r = 0;
156 }
157 break;
158
David Woodhouse40da8cc2020-12-09 20:08:30 +0000159 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
160 data->u.vector = kvm->arch.xen.upcall_vector;
161 r = 0;
162 break;
163
Joao Martinsa76b9642020-12-03 15:52:25 +0000164 default:
165 break;
166 }
167
168 mutex_unlock(&kvm->lock);
169 return r;
170}
171
David Woodhouse3e324612021-02-02 16:53:25 +0000172int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
173{
Joao Martins73e69a82018-06-29 10:52:52 -0400174 int idx, r = -ENOENT;
David Woodhouse3e324612021-02-02 16:53:25 +0000175
176 mutex_lock(&vcpu->kvm->lock);
Joao Martins73e69a82018-06-29 10:52:52 -0400177 idx = srcu_read_lock(&vcpu->kvm->srcu);
David Woodhouse3e324612021-02-02 16:53:25 +0000178
179 switch (data->type) {
Joao Martins73e69a82018-06-29 10:52:52 -0400180 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
181 /* No compat necessary here. */
182 BUILD_BUG_ON(sizeof(struct vcpu_info) !=
183 sizeof(struct compat_vcpu_info));
184
185 r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
186 &vcpu->arch.xen.vcpu_info_cache,
187 data->u.gpa,
188 sizeof(struct vcpu_info));
Joao Martinsaa096aa2019-02-01 13:01:45 -0500189 if (!r) {
Joao Martins73e69a82018-06-29 10:52:52 -0400190 vcpu->arch.xen.vcpu_info_set = true;
Joao Martinsaa096aa2019-02-01 13:01:45 -0500191 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
192 }
Joao Martins73e69a82018-06-29 10:52:52 -0400193 break;
194
Joao Martinsf2340cd2018-07-23 11:20:57 -0400195 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
196 r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
197 &vcpu->arch.xen.vcpu_time_info_cache,
198 data->u.gpa,
199 sizeof(struct pvclock_vcpu_time_info));
200 if (!r) {
201 vcpu->arch.xen.vcpu_time_info_set = true;
202 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
203 }
204 break;
205
David Woodhouse3e324612021-02-02 16:53:25 +0000206 default:
207 break;
208 }
209
Joao Martins73e69a82018-06-29 10:52:52 -0400210 srcu_read_unlock(&vcpu->kvm->srcu, idx);
David Woodhouse3e324612021-02-02 16:53:25 +0000211 mutex_unlock(&vcpu->kvm->lock);
212 return r;
213}
214
215int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
216{
217 int r = -ENOENT;
218
219 mutex_lock(&vcpu->kvm->lock);
220
221 switch (data->type) {
Joao Martins73e69a82018-06-29 10:52:52 -0400222 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
223 if (vcpu->arch.xen.vcpu_info_set) {
224 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
225 r = 0;
226 }
227 break;
228
Joao Martinsf2340cd2018-07-23 11:20:57 -0400229 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
230 if (vcpu->arch.xen.vcpu_time_info_set) {
231 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
232 r = 0;
233 }
234 break;
235
David Woodhouse3e324612021-02-02 16:53:25 +0000236 default:
237 break;
238 }
239
240 mutex_unlock(&vcpu->kvm->lock);
241 return r;
242}
243
Joao Martins23200b72018-06-13 09:55:44 -0400244int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
245{
246 struct kvm *kvm = vcpu->kvm;
247 u32 page_num = data & ~PAGE_MASK;
248 u64 page_addr = data & PAGE_MASK;
David Woodhousea3833b82020-12-03 16:20:32 +0000249 bool lm = is_long_mode(vcpu);
250
251 /* Latch long_mode for shared_info pages etc. */
252 vcpu->kvm->arch.xen.long_mode = lm;
Joao Martins23200b72018-06-13 09:55:44 -0400253
254 /*
255 * If Xen hypercall intercept is enabled, fill the hypercall
256 * page with VMCALL/VMMCALL instructions since that's what
257 * we catch. Else the VMM has provided the hypercall pages
258 * with instructions of its own choosing, so use those.
259 */
260 if (kvm_xen_hypercall_enabled(kvm)) {
261 u8 instructions[32];
262 int i;
263
264 if (page_num)
265 return 1;
266
267 /* mov imm32, %eax */
268 instructions[0] = 0xb8;
269
270 /* vmcall / vmmcall */
271 kvm_x86_ops.patch_hypercall(vcpu, instructions + 5);
272
273 /* ret */
274 instructions[8] = 0xc3;
275
276 /* int3 to pad */
277 memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
278
279 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
280 *(u32 *)&instructions[1] = i;
281 if (kvm_vcpu_write_guest(vcpu,
282 page_addr + (i * sizeof(instructions)),
283 instructions, sizeof(instructions)))
284 return 1;
285 }
286 } else {
Sean Christopherson448841f2021-02-08 12:15:02 -0800287 /*
288 * Note, truncation is a non-issue as 'lm' is guaranteed to be
289 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
290 */
291 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
292 : kvm->arch.xen_hvm_config.blob_addr_32;
Joao Martins23200b72018-06-13 09:55:44 -0400293 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
294 : kvm->arch.xen_hvm_config.blob_size_32;
295 u8 *page;
296
297 if (page_num >= blob_size)
298 return 1;
299
300 blob_addr += page_num * PAGE_SIZE;
301
302 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
303 if (IS_ERR(page))
304 return PTR_ERR(page);
305
306 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
307 kfree(page);
308 return 1;
309 }
310 }
311 return 0;
312}
313
David Woodhouse78e98782021-02-02 13:19:35 +0000314int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
315{
316 if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL)
317 return -EINVAL;
318
319 /*
320 * With hypercall interception the kernel generates its own
321 * hypercall page so it must not be provided.
322 */
323 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
324 (xhc->blob_addr_32 || xhc->blob_addr_64 ||
325 xhc->blob_size_32 || xhc->blob_size_64))
326 return -EINVAL;
327
David Woodhouse7d6bbeb2021-02-02 15:48:05 +0000328 mutex_lock(&kvm->lock);
329
330 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
331 static_branch_inc(&kvm_xen_enabled.key);
332 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
333 static_branch_slow_dec_deferred(&kvm_xen_enabled);
334
David Woodhouse78e98782021-02-02 13:19:35 +0000335 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
David Woodhouse7d6bbeb2021-02-02 15:48:05 +0000336
337 mutex_unlock(&kvm->lock);
David Woodhouse78e98782021-02-02 13:19:35 +0000338 return 0;
339}
340
David Woodhouse7d6bbeb2021-02-02 15:48:05 +0000341void kvm_xen_destroy_vm(struct kvm *kvm)
342{
343 if (kvm->arch.xen_hvm_config.msr)
344 static_branch_slow_dec_deferred(&kvm_xen_enabled);
345}
346
Joao Martins23200b72018-06-13 09:55:44 -0400347static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
348{
349 kvm_rax_write(vcpu, result);
350 return kvm_skip_emulated_instruction(vcpu);
351}
352
353static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
354{
355 struct kvm_run *run = vcpu->run;
356
357 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
358 return 1;
359
360 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
361}
362
363int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
364{
365 bool longmode;
366 u64 input, params[6];
367
368 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
369
Joao Martins79033be2018-06-13 09:55:44 -0400370 /* Hyper-V hypercalls get bit 31 set in EAX */
371 if ((input & 0x80000000) &&
372 kvm_hv_hypercall_enabled(vcpu->kvm))
373 return kvm_hv_hypercall(vcpu);
374
Joao Martins23200b72018-06-13 09:55:44 -0400375 longmode = is_64_bit_mode(vcpu);
376 if (!longmode) {
377 params[0] = (u32)kvm_rbx_read(vcpu);
378 params[1] = (u32)kvm_rcx_read(vcpu);
379 params[2] = (u32)kvm_rdx_read(vcpu);
380 params[3] = (u32)kvm_rsi_read(vcpu);
381 params[4] = (u32)kvm_rdi_read(vcpu);
382 params[5] = (u32)kvm_rbp_read(vcpu);
383 }
384#ifdef CONFIG_X86_64
385 else {
386 params[0] = (u64)kvm_rdi_read(vcpu);
387 params[1] = (u64)kvm_rsi_read(vcpu);
388 params[2] = (u64)kvm_rdx_read(vcpu);
389 params[3] = (u64)kvm_r10_read(vcpu);
390 params[4] = (u64)kvm_r8_read(vcpu);
391 params[5] = (u64)kvm_r9_read(vcpu);
392 }
393#endif
394 trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
395 params[3], params[4], params[5]);
396
397 vcpu->run->exit_reason = KVM_EXIT_XEN;
398 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
399 vcpu->run->xen.u.hcall.longmode = longmode;
400 vcpu->run->xen.u.hcall.cpl = kvm_x86_ops.get_cpl(vcpu);
401 vcpu->run->xen.u.hcall.input = input;
402 vcpu->run->xen.u.hcall.params[0] = params[0];
403 vcpu->run->xen.u.hcall.params[1] = params[1];
404 vcpu->run->xen.u.hcall.params[2] = params[2];
405 vcpu->run->xen.u.hcall.params[3] = params[3];
406 vcpu->run->xen.u.hcall.params[4] = params[4];
407 vcpu->run->xen.u.hcall.params[5] = params[5];
408 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
409 vcpu->arch.complete_userspace_io =
410 kvm_xen_hypercall_complete_userspace;
411
412 return 0;
413}