blob: 5cbf6955e509afcf430324ddb018deae7fc1b509 [file] [log] [blame]
Joao Martins23200b72018-06-13 09:55:44 -04001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 *
6 * KVM Xen emulation
7 */
8
9#include "x86.h"
10#include "xen.h"
Joao Martins79033be2018-06-13 09:55:44 -040011#include "hyperv.h"
Joao Martins23200b72018-06-13 09:55:44 -040012
13#include <linux/kvm_host.h>
14
15#include <trace/events/kvm.h>
Joao Martins13ffb972018-06-15 21:17:14 -040016#include <xen/interface/xen.h>
Joao Martins23200b72018-06-13 09:55:44 -040017
18#include "trace.h"
19
David Woodhouse7d6bbeb2021-02-02 15:48:05 +000020DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
21
Joao Martins13ffb972018-06-15 21:17:14 -040022static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
23{
Joao Martins629b5342018-06-28 15:06:43 -040024 gpa_t gpa = gfn_to_gpa(gfn);
25 int wc_ofs, sec_hi_ofs;
Joao Martins13ffb972018-06-15 21:17:14 -040026 int ret;
27 int idx = srcu_read_lock(&kvm->srcu);
28
29 ret = kvm_gfn_to_hva_cache_init(kvm, &kvm->arch.xen.shinfo_cache,
Joao Martins629b5342018-06-28 15:06:43 -040030 gpa, PAGE_SIZE);
31 if (ret)
32 goto out;
Joao Martins13ffb972018-06-15 21:17:14 -040033
Joao Martins629b5342018-06-28 15:06:43 -040034 kvm->arch.xen.shinfo_set = true;
35
36 /* Paranoia checks on the 32-bit struct layout */
37 BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
38 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
39 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
40
41 /* 32-bit location by default */
42 wc_ofs = offsetof(struct compat_shared_info, wc);
43 sec_hi_ofs = offsetof(struct compat_shared_info, arch.wc_sec_hi);
44
45#ifdef CONFIG_X86_64
46 /* Paranoia checks on the 64-bit struct layout */
47 BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
48 BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
49
50 if (kvm->arch.xen.long_mode) {
51 wc_ofs = offsetof(struct shared_info, wc);
52 sec_hi_ofs = offsetof(struct shared_info, wc_sec_hi);
53 }
54#endif
55
56 kvm_write_wall_clock(kvm, gpa + wc_ofs, sec_hi_ofs - wc_ofs);
57 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
58
59out:
Joao Martins13ffb972018-06-15 21:17:14 -040060 srcu_read_unlock(&kvm->srcu, idx);
61 return ret;
62}
63
Joao Martinsa76b9642020-12-03 15:52:25 +000064int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
65{
66 int r = -ENOENT;
67
Joao Martins13ffb972018-06-15 21:17:14 -040068 mutex_lock(&kvm->lock);
69
Joao Martinsa76b9642020-12-03 15:52:25 +000070 mutex_unlock(&kvm->lock);
71
72 switch (data->type) {
David Woodhousea3833b82020-12-03 16:20:32 +000073 case KVM_XEN_ATTR_TYPE_LONG_MODE:
Joao Martins13ffb972018-06-15 21:17:14 -040074 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
75 r = -EINVAL;
76 } else {
77 kvm->arch.xen.long_mode = !!data->u.long_mode;
78 r = 0;
79 }
David Woodhousea3833b82020-12-03 16:20:32 +000080 break;
Joao Martins13ffb972018-06-15 21:17:14 -040081
82 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
83 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
84 break;
85
Joao Martinsa76b9642020-12-03 15:52:25 +000086 default:
87 break;
88 }
89
90 mutex_unlock(&kvm->lock);
91 return r;
92}
93
94int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
95{
96 int r = -ENOENT;
97
98 mutex_lock(&kvm->lock);
99
100 switch (data->type) {
David Woodhousea3833b82020-12-03 16:20:32 +0000101 case KVM_XEN_ATTR_TYPE_LONG_MODE:
102 data->u.long_mode = kvm->arch.xen.long_mode;
103 r = 0;
104 break;
Joao Martins13ffb972018-06-15 21:17:14 -0400105
106 case KVM_XEN_ATTR_TYPE_SHARED_INFO:
107 if (kvm->arch.xen.shinfo_set) {
108 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
109 r = 0;
110 }
111 break;
112
Joao Martinsa76b9642020-12-03 15:52:25 +0000113 default:
114 break;
115 }
116
117 mutex_unlock(&kvm->lock);
118 return r;
119}
120
David Woodhouse3e324612021-02-02 16:53:25 +0000121int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
122{
123 int r = -ENOENT;
124
125 mutex_lock(&vcpu->kvm->lock);
126
127 switch (data->type) {
128 default:
129 break;
130 }
131
132 mutex_unlock(&vcpu->kvm->lock);
133 return r;
134}
135
136int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
137{
138 int r = -ENOENT;
139
140 mutex_lock(&vcpu->kvm->lock);
141
142 switch (data->type) {
143 default:
144 break;
145 }
146
147 mutex_unlock(&vcpu->kvm->lock);
148 return r;
149}
150
Joao Martins23200b72018-06-13 09:55:44 -0400151int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
152{
153 struct kvm *kvm = vcpu->kvm;
154 u32 page_num = data & ~PAGE_MASK;
155 u64 page_addr = data & PAGE_MASK;
David Woodhousea3833b82020-12-03 16:20:32 +0000156 bool lm = is_long_mode(vcpu);
157
158 /* Latch long_mode for shared_info pages etc. */
159 vcpu->kvm->arch.xen.long_mode = lm;
Joao Martins23200b72018-06-13 09:55:44 -0400160
161 /*
162 * If Xen hypercall intercept is enabled, fill the hypercall
163 * page with VMCALL/VMMCALL instructions since that's what
164 * we catch. Else the VMM has provided the hypercall pages
165 * with instructions of its own choosing, so use those.
166 */
167 if (kvm_xen_hypercall_enabled(kvm)) {
168 u8 instructions[32];
169 int i;
170
171 if (page_num)
172 return 1;
173
174 /* mov imm32, %eax */
175 instructions[0] = 0xb8;
176
177 /* vmcall / vmmcall */
178 kvm_x86_ops.patch_hypercall(vcpu, instructions + 5);
179
180 /* ret */
181 instructions[8] = 0xc3;
182
183 /* int3 to pad */
184 memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
185
186 for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
187 *(u32 *)&instructions[1] = i;
188 if (kvm_vcpu_write_guest(vcpu,
189 page_addr + (i * sizeof(instructions)),
190 instructions, sizeof(instructions)))
191 return 1;
192 }
193 } else {
Joao Martins23200b72018-06-13 09:55:44 -0400194 u64 blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
195 : kvm->arch.xen_hvm_config.blob_addr_32;
196 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
197 : kvm->arch.xen_hvm_config.blob_size_32;
198 u8 *page;
199
200 if (page_num >= blob_size)
201 return 1;
202
203 blob_addr += page_num * PAGE_SIZE;
204
205 page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
206 if (IS_ERR(page))
207 return PTR_ERR(page);
208
209 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
210 kfree(page);
211 return 1;
212 }
213 }
214 return 0;
215}
216
David Woodhouse78e98782021-02-02 13:19:35 +0000217int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
218{
219 if (xhc->flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL)
220 return -EINVAL;
221
222 /*
223 * With hypercall interception the kernel generates its own
224 * hypercall page so it must not be provided.
225 */
226 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
227 (xhc->blob_addr_32 || xhc->blob_addr_64 ||
228 xhc->blob_size_32 || xhc->blob_size_64))
229 return -EINVAL;
230
David Woodhouse7d6bbeb2021-02-02 15:48:05 +0000231 mutex_lock(&kvm->lock);
232
233 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
234 static_branch_inc(&kvm_xen_enabled.key);
235 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
236 static_branch_slow_dec_deferred(&kvm_xen_enabled);
237
David Woodhouse78e98782021-02-02 13:19:35 +0000238 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
David Woodhouse7d6bbeb2021-02-02 15:48:05 +0000239
240 mutex_unlock(&kvm->lock);
David Woodhouse78e98782021-02-02 13:19:35 +0000241 return 0;
242}
243
David Woodhouse7d6bbeb2021-02-02 15:48:05 +0000244void kvm_xen_destroy_vm(struct kvm *kvm)
245{
246 if (kvm->arch.xen_hvm_config.msr)
247 static_branch_slow_dec_deferred(&kvm_xen_enabled);
248}
249
Joao Martins23200b72018-06-13 09:55:44 -0400250static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
251{
252 kvm_rax_write(vcpu, result);
253 return kvm_skip_emulated_instruction(vcpu);
254}
255
256static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
257{
258 struct kvm_run *run = vcpu->run;
259
260 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
261 return 1;
262
263 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
264}
265
266int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
267{
268 bool longmode;
269 u64 input, params[6];
270
271 input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
272
Joao Martins79033be2018-06-13 09:55:44 -0400273 /* Hyper-V hypercalls get bit 31 set in EAX */
274 if ((input & 0x80000000) &&
275 kvm_hv_hypercall_enabled(vcpu->kvm))
276 return kvm_hv_hypercall(vcpu);
277
Joao Martins23200b72018-06-13 09:55:44 -0400278 longmode = is_64_bit_mode(vcpu);
279 if (!longmode) {
280 params[0] = (u32)kvm_rbx_read(vcpu);
281 params[1] = (u32)kvm_rcx_read(vcpu);
282 params[2] = (u32)kvm_rdx_read(vcpu);
283 params[3] = (u32)kvm_rsi_read(vcpu);
284 params[4] = (u32)kvm_rdi_read(vcpu);
285 params[5] = (u32)kvm_rbp_read(vcpu);
286 }
287#ifdef CONFIG_X86_64
288 else {
289 params[0] = (u64)kvm_rdi_read(vcpu);
290 params[1] = (u64)kvm_rsi_read(vcpu);
291 params[2] = (u64)kvm_rdx_read(vcpu);
292 params[3] = (u64)kvm_r10_read(vcpu);
293 params[4] = (u64)kvm_r8_read(vcpu);
294 params[5] = (u64)kvm_r9_read(vcpu);
295 }
296#endif
297 trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
298 params[3], params[4], params[5]);
299
300 vcpu->run->exit_reason = KVM_EXIT_XEN;
301 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
302 vcpu->run->xen.u.hcall.longmode = longmode;
303 vcpu->run->xen.u.hcall.cpl = kvm_x86_ops.get_cpl(vcpu);
304 vcpu->run->xen.u.hcall.input = input;
305 vcpu->run->xen.u.hcall.params[0] = params[0];
306 vcpu->run->xen.u.hcall.params[1] = params[1];
307 vcpu->run->xen.u.hcall.params[2] = params[2];
308 vcpu->run->xen.u.hcall.params[3] = params[3];
309 vcpu->run->xen.u.hcall.params[4] = params[4];
310 vcpu->run->xen.u.hcall.params[5] = params[5];
311 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
312 vcpu->arch.complete_userspace_io =
313 kvm_xen_hypercall_complete_userspace;
314
315 return 0;
316}