blob: 0ad11a2324749ce44563da44bae9f72b0daee9a0 [file] [log] [blame]
Andrey Smetanine83d5882015-07-03 15:01:34 +03001/*
2 * KVM Microsoft Hyper-V emulation
3 *
4 * derived from arch/x86/kvm/x86.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
11 *
12 * Authors:
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 * Andrey Smetanin <asmetanin@virtuozzo.com>
18 *
19 * This work is licensed under the terms of the GNU GPL, version 2. See
20 * the COPYING file in the top-level directory.
21 *
22 */
23
24#include "x86.h"
25#include "lapic.h"
26#include "hyperv.h"
27
28#include <linux/kvm_host.h>
29#include <trace/events/kvm.h>
30
31#include "trace.h"
32
33static bool kvm_hv_msr_partition_wide(u32 msr)
34{
35 bool r = false;
36
37 switch (msr) {
38 case HV_X64_MSR_GUEST_OS_ID:
39 case HV_X64_MSR_HYPERCALL:
40 case HV_X64_MSR_REFERENCE_TSC:
41 case HV_X64_MSR_TIME_REF_COUNT:
Andrey Smetanine7d95132015-07-03 15:01:37 +030042 case HV_X64_MSR_CRASH_CTL:
43 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
Andrey Smetanine516ceb2015-09-16 12:29:48 +030044 case HV_X64_MSR_RESET:
Andrey Smetanine83d5882015-07-03 15:01:34 +030045 r = true;
46 break;
47 }
48
49 return r;
50}
51
Andrey Smetanine7d95132015-07-03 15:01:37 +030052static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
53 u32 index, u64 *pdata)
54{
55 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
56
57 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
58 return -EINVAL;
59
60 *pdata = hv->hv_crash_param[index];
61 return 0;
62}
63
64static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
65{
66 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
67
68 *pdata = hv->hv_crash_ctl;
69 return 0;
70}
71
72static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
73{
74 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
75
76 if (host)
77 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
78
79 if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
80
81 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
82 hv->hv_crash_param[0],
83 hv->hv_crash_param[1],
84 hv->hv_crash_param[2],
85 hv->hv_crash_param[3],
86 hv->hv_crash_param[4]);
87
88 /* Send notification about crash to user space */
89 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
90 }
91
92 return 0;
93}
94
95static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
96 u32 index, u64 data)
97{
98 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
99
100 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
101 return -EINVAL;
102
103 hv->hv_crash_param[index] = data;
104 return 0;
105}
106
107static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
108 bool host)
Andrey Smetanine83d5882015-07-03 15:01:34 +0300109{
110 struct kvm *kvm = vcpu->kvm;
111 struct kvm_hv *hv = &kvm->arch.hyperv;
112
113 switch (msr) {
114 case HV_X64_MSR_GUEST_OS_ID:
115 hv->hv_guest_os_id = data;
116 /* setting guest os id to zero disables hypercall page */
117 if (!hv->hv_guest_os_id)
118 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
119 break;
120 case HV_X64_MSR_HYPERCALL: {
121 u64 gfn;
122 unsigned long addr;
123 u8 instructions[4];
124
125 /* if guest os id is not set hypercall should remain disabled */
126 if (!hv->hv_guest_os_id)
127 break;
128 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
129 hv->hv_hypercall = data;
130 break;
131 }
132 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
133 addr = gfn_to_hva(kvm, gfn);
134 if (kvm_is_error_hva(addr))
135 return 1;
136 kvm_x86_ops->patch_hypercall(vcpu, instructions);
137 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
138 if (__copy_to_user((void __user *)addr, instructions, 4))
139 return 1;
140 hv->hv_hypercall = data;
141 mark_page_dirty(kvm, gfn);
142 break;
143 }
144 case HV_X64_MSR_REFERENCE_TSC: {
145 u64 gfn;
146 HV_REFERENCE_TSC_PAGE tsc_ref;
147
148 memset(&tsc_ref, 0, sizeof(tsc_ref));
149 hv->hv_tsc_page = data;
150 if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
151 break;
152 gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
153 if (kvm_write_guest(
154 kvm,
155 gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
156 &tsc_ref, sizeof(tsc_ref)))
157 return 1;
158 mark_page_dirty(kvm, gfn);
159 break;
160 }
Andrey Smetanine7d95132015-07-03 15:01:37 +0300161 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
162 return kvm_hv_msr_set_crash_data(vcpu,
163 msr - HV_X64_MSR_CRASH_P0,
164 data);
165 case HV_X64_MSR_CRASH_CTL:
166 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
Andrey Smetanine516ceb2015-09-16 12:29:48 +0300167 case HV_X64_MSR_RESET:
168 if (data == 1) {
169 vcpu_debug(vcpu, "hyper-v reset requested\n");
170 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
171 }
172 break;
Andrey Smetanine83d5882015-07-03 15:01:34 +0300173 default:
174 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
175 msr, data);
176 return 1;
177 }
178 return 0;
179}
180
181static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
182{
183 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
184
185 switch (msr) {
186 case HV_X64_MSR_APIC_ASSIST_PAGE: {
187 u64 gfn;
188 unsigned long addr;
189
190 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
191 hv->hv_vapic = data;
192 if (kvm_lapic_enable_pv_eoi(vcpu, 0))
193 return 1;
194 break;
195 }
196 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
197 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
198 if (kvm_is_error_hva(addr))
199 return 1;
200 if (__clear_user((void __user *)addr, PAGE_SIZE))
201 return 1;
202 hv->hv_vapic = data;
203 kvm_vcpu_mark_page_dirty(vcpu, gfn);
204 if (kvm_lapic_enable_pv_eoi(vcpu,
205 gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
206 return 1;
207 break;
208 }
209 case HV_X64_MSR_EOI:
210 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
211 case HV_X64_MSR_ICR:
212 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
213 case HV_X64_MSR_TPR:
214 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
215 default:
216 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
217 msr, data);
218 return 1;
219 }
220
221 return 0;
222}
223
224static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
225{
226 u64 data = 0;
227 struct kvm *kvm = vcpu->kvm;
228 struct kvm_hv *hv = &kvm->arch.hyperv;
229
230 switch (msr) {
231 case HV_X64_MSR_GUEST_OS_ID:
232 data = hv->hv_guest_os_id;
233 break;
234 case HV_X64_MSR_HYPERCALL:
235 data = hv->hv_hypercall;
236 break;
237 case HV_X64_MSR_TIME_REF_COUNT: {
238 data =
239 div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
240 break;
241 }
242 case HV_X64_MSR_REFERENCE_TSC:
243 data = hv->hv_tsc_page;
244 break;
Andrey Smetanine7d95132015-07-03 15:01:37 +0300245 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
246 return kvm_hv_msr_get_crash_data(vcpu,
247 msr - HV_X64_MSR_CRASH_P0,
248 pdata);
249 case HV_X64_MSR_CRASH_CTL:
250 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
Andrey Smetanine516ceb2015-09-16 12:29:48 +0300251 case HV_X64_MSR_RESET:
252 data = 0;
253 break;
Andrey Smetanine83d5882015-07-03 15:01:34 +0300254 default:
255 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
256 return 1;
257 }
258
259 *pdata = data;
260 return 0;
261}
262
263static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
264{
265 u64 data = 0;
266 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
267
268 switch (msr) {
269 case HV_X64_MSR_VP_INDEX: {
270 int r;
271 struct kvm_vcpu *v;
272
273 kvm_for_each_vcpu(r, v, vcpu->kvm) {
274 if (v == vcpu) {
275 data = r;
276 break;
277 }
278 }
279 break;
280 }
281 case HV_X64_MSR_EOI:
282 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
283 case HV_X64_MSR_ICR:
284 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
285 case HV_X64_MSR_TPR:
286 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
287 case HV_X64_MSR_APIC_ASSIST_PAGE:
288 data = hv->hv_vapic;
289 break;
290 default:
291 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
292 return 1;
293 }
294 *pdata = data;
295 return 0;
296}
297
Andrey Smetanine7d95132015-07-03 15:01:37 +0300298int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
Andrey Smetanine83d5882015-07-03 15:01:34 +0300299{
300 if (kvm_hv_msr_partition_wide(msr)) {
301 int r;
302
303 mutex_lock(&vcpu->kvm->lock);
Andrey Smetanine7d95132015-07-03 15:01:37 +0300304 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
Andrey Smetanine83d5882015-07-03 15:01:34 +0300305 mutex_unlock(&vcpu->kvm->lock);
306 return r;
307 } else
308 return kvm_hv_set_msr(vcpu, msr, data);
309}
310
311int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
312{
313 if (kvm_hv_msr_partition_wide(msr)) {
314 int r;
315
316 mutex_lock(&vcpu->kvm->lock);
317 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
318 mutex_unlock(&vcpu->kvm->lock);
319 return r;
320 } else
321 return kvm_hv_get_msr(vcpu, msr, pdata);
322}
323
324bool kvm_hv_hypercall_enabled(struct kvm *kvm)
325{
326 return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
327}
328
329int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
330{
331 u64 param, ingpa, outgpa, ret;
332 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
333 bool fast, longmode;
334
335 /*
336 * hypercall generates UD from non zero cpl and real mode
337 * per HYPER-V spec
338 */
339 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
340 kvm_queue_exception(vcpu, UD_VECTOR);
341 return 0;
342 }
343
344 longmode = is_64_bit_mode(vcpu);
345
346 if (!longmode) {
347 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
348 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
349 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
350 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
351 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
352 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
353 }
354#ifdef CONFIG_X86_64
355 else {
356 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
357 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
358 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
359 }
360#endif
361
362 code = param & 0xffff;
363 fast = (param >> 16) & 0x1;
364 rep_cnt = (param >> 32) & 0xfff;
365 rep_idx = (param >> 48) & 0xfff;
366
367 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
368
369 switch (code) {
370 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
371 kvm_vcpu_on_spin(vcpu);
372 break;
373 default:
374 res = HV_STATUS_INVALID_HYPERCALL_CODE;
375 break;
376 }
377
378 ret = res | (((u64)rep_done & 0xfff) << 32);
379 if (longmode) {
380 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
381 } else {
382 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
383 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
384 }
385
386 return 1;
387}