blob: 1ac036e45ed4f8255e0fb1f05a3bfe6ce165af04 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050024#include <linux/vmalloc.h>
Alexander Graf544c6762009-11-02 12:02:31 +000025#include <linux/hrtimer.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050026#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Scott Woodeb1e4f42013-04-12 14:08:47 +000028#include <linux/file.h>
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +053029#include <linux/module.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050030#include <asm/cputable.h>
31#include <asm/uaccess.h>
32#include <asm/kvm_ppc.h>
Hollis Blanchard83aae4a2008-07-25 13:54:52 -050033#include <asm/tlbflush.h>
Paul Mackerras371fefd2011-06-29 00:23:08 +000034#include <asm/cputhreads.h>
Alexander Grafbd2be682012-08-13 01:04:19 +020035#include <asm/irqflags.h>
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +110036#include <asm/iommu.h>
Hollis Blanchard73e75b42008-12-02 15:51:57 -060037#include "timing.h"
Alexander Graf5efdb4b2013-04-17 00:37:57 +020038#include "irq.h"
Paul Mackerrasfad7b9b2008-12-23 14:57:26 +110039#include "../mm/mmu_decl.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Marcelo Tosatti46f43c62009-06-18 11:47:27 -030041#define CREATE_TRACE_POINTS
42#include "trace.h"
43
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +053044struct kvmppc_ops *kvmppc_hv_ops;
45EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46struct kvmppc_ops *kvmppc_pr_ops;
47EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053049
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050050int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51{
Liu Yu-B132019202e072012-07-03 05:48:52 +000052 return !!(v->arch.pending_exceptions) ||
Scott Wooddfd4d472011-11-17 12:39:59 +000053 v->requests;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050054}
55
Christoffer Dallb6d33832012-03-08 16:44:24 -050056int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
57{
58 return 1;
59}
60
Alexander Graf03d25c52012-08-10 12:28:50 +020061/*
62 * Common checks before entering the guest world. Call with interrupts
63 * disabled.
64 *
Alexander Graf7ee78852012-08-13 12:44:41 +020065 * returns:
66 *
67 * == 1 if we're ready to go into guest state
68 * <= 0 if we need to go back to the host with return value
Alexander Graf03d25c52012-08-10 12:28:50 +020069 */
70int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
71{
Scott Wood6c85f522014-01-09 19:18:40 -060072 int r;
Alexander Graf03d25c52012-08-10 12:28:50 +020073
Scott Wood6c85f522014-01-09 19:18:40 -060074 WARN_ON(irqs_disabled());
75 hard_irq_disable();
76
Alexander Graf03d25c52012-08-10 12:28:50 +020077 while (true) {
78 if (need_resched()) {
79 local_irq_enable();
80 cond_resched();
Scott Wood6c85f522014-01-09 19:18:40 -060081 hard_irq_disable();
Alexander Graf03d25c52012-08-10 12:28:50 +020082 continue;
83 }
84
85 if (signal_pending(current)) {
Alexander Graf7ee78852012-08-13 12:44:41 +020086 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
87 vcpu->run->exit_reason = KVM_EXIT_INTR;
88 r = -EINTR;
Alexander Graf03d25c52012-08-10 12:28:50 +020089 break;
90 }
91
Scott Wood5bd1cf12012-08-22 15:03:50 +000092 vcpu->mode = IN_GUEST_MODE;
93
94 /*
95 * Reading vcpu->requests must happen after setting vcpu->mode,
96 * so we don't miss a request because the requester sees
97 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
98 * before next entering the guest (and thus doesn't IPI).
Lan Tianyu489153c2016-03-13 11:10:30 +080099 * This also orders the write to mode from any reads
100 * to the page tables done while the VCPU is running.
101 * Please see the comment in kvm_flush_remote_tlbs.
Scott Wood5bd1cf12012-08-22 15:03:50 +0000102 */
Alexander Graf03d25c52012-08-10 12:28:50 +0200103 smp_mb();
Scott Wood5bd1cf12012-08-22 15:03:50 +0000104
Alexander Graf03d25c52012-08-10 12:28:50 +0200105 if (vcpu->requests) {
106 /* Make sure we process requests preemptable */
107 local_irq_enable();
108 trace_kvm_check_requests(vcpu);
Alexander Graf7c973a22012-08-13 12:50:35 +0200109 r = kvmppc_core_check_requests(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -0600110 hard_irq_disable();
Alexander Graf7c973a22012-08-13 12:50:35 +0200111 if (r > 0)
112 continue;
113 break;
Alexander Graf03d25c52012-08-10 12:28:50 +0200114 }
115
116 if (kvmppc_core_prepare_to_enter(vcpu)) {
117 /* interrupts got enabled in between, so we
118 are back at square 1 */
119 continue;
120 }
121
Paolo Bonzini6edaa532016-06-15 15:18:26 +0200122 guest_enter_irqoff();
Scott Wood6c85f522014-01-09 19:18:40 -0600123 return 1;
Alexander Graf03d25c52012-08-10 12:28:50 +0200124 }
125
Scott Wood6c85f522014-01-09 19:18:40 -0600126 /* return to host */
127 local_irq_enable();
Alexander Graf03d25c52012-08-10 12:28:50 +0200128 return r;
129}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530130EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
Alexander Graf03d25c52012-08-10 12:28:50 +0200131
Alexander Graf5deb8e72014-04-24 13:46:24 +0200132#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
133static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
134{
135 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
136 int i;
137
138 shared->sprg0 = swab64(shared->sprg0);
139 shared->sprg1 = swab64(shared->sprg1);
140 shared->sprg2 = swab64(shared->sprg2);
141 shared->sprg3 = swab64(shared->sprg3);
142 shared->srr0 = swab64(shared->srr0);
143 shared->srr1 = swab64(shared->srr1);
144 shared->dar = swab64(shared->dar);
145 shared->msr = swab64(shared->msr);
146 shared->dsisr = swab32(shared->dsisr);
147 shared->int_pending = swab32(shared->int_pending);
148 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
149 shared->sr[i] = swab32(shared->sr[i]);
150}
151#endif
152
Alexander Graf2a342ed2010-07-29 14:47:48 +0200153int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
154{
155 int nr = kvmppc_get_gpr(vcpu, 11);
156 int r;
157 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
158 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
159 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
160 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
161 unsigned long r2 = 0;
162
Alexander Graf5deb8e72014-04-24 13:46:24 +0200163 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
Alexander Graf2a342ed2010-07-29 14:47:48 +0200164 /* 32 bit mode */
165 param1 &= 0xffffffff;
166 param2 &= 0xffffffff;
167 param3 &= 0xffffffff;
168 param4 &= 0xffffffff;
169 }
170
171 switch (nr) {
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000172 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
Alexander Graf5fc87402010-07-29 14:47:55 +0200173 {
Alexander Graf5deb8e72014-04-24 13:46:24 +0200174#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
175 /* Book3S can be little endian, find it out here */
176 int shared_big_endian = true;
177 if (vcpu->arch.intr_msr & MSR_LE)
178 shared_big_endian = false;
179 if (shared_big_endian != vcpu->arch.shared_big_endian)
180 kvmppc_swab_shared(vcpu);
181 vcpu->arch.shared_big_endian = shared_big_endian;
182#endif
183
Alexander Graff3383cf2014-05-12 01:08:32 +0200184 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
185 /*
186 * Older versions of the Linux magic page code had
187 * a bug where they would map their trampoline code
188 * NX. If that's the case, remove !PR NX capability.
189 */
190 vcpu->arch.disable_kernel_nx = true;
191 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
192 }
193
194 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
195 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
Alexander Graf5fc87402010-07-29 14:47:55 +0200196
Alexander Graf89b68c92014-07-13 16:37:12 +0200197#ifdef CONFIG_PPC_64K_PAGES
198 /*
199 * Make sure our 4k magic page is in the same window of a 64k
200 * page within the guest and within the host's page.
201 */
202 if ((vcpu->arch.magic_page_pa & 0xf000) !=
203 ((ulong)vcpu->arch.shared & 0xf000)) {
204 void *old_shared = vcpu->arch.shared;
205 ulong shared = (ulong)vcpu->arch.shared;
206 void *new_shared;
207
208 shared &= PAGE_MASK;
209 shared |= vcpu->arch.magic_page_pa & 0xf000;
210 new_shared = (void*)shared;
211 memcpy(new_shared, old_shared, 0x1000);
212 vcpu->arch.shared = new_shared;
213 }
214#endif
215
Scott Woodb5904972011-11-08 18:23:30 -0600216 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
Alexander Graf7508e162010-08-03 11:32:56 +0200217
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000218 r = EV_SUCCESS;
Alexander Graf5fc87402010-07-29 14:47:55 +0200219 break;
220 }
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000221 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
222 r = EV_SUCCESS;
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000223#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
Alexander Graf5fc87402010-07-29 14:47:55 +0200224 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
225#endif
Alexander Graf2a342ed2010-07-29 14:47:48 +0200226
227 /* Second return value is in r4 */
Alexander Graf2a342ed2010-07-29 14:47:48 +0200228 break;
Liu Yu-B132019202e072012-07-03 05:48:52 +0000229 case EV_HCALL_TOKEN(EV_IDLE):
230 r = EV_SUCCESS;
231 kvm_vcpu_block(vcpu);
232 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
233 break;
Alexander Graf2a342ed2010-07-29 14:47:48 +0200234 default:
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000235 r = EV_UNIMPLEMENTED;
Alexander Graf2a342ed2010-07-29 14:47:48 +0200236 break;
237 }
238
Alexander Graf7508e162010-08-03 11:32:56 +0200239 kvmppc_set_gpr(vcpu, 4, r2);
240
Alexander Graf2a342ed2010-07-29 14:47:48 +0200241 return r;
242}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530243EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500244
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200245int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
246{
247 int r = false;
248
249 /* We have to know what CPU to virtualize */
250 if (!vcpu->arch.pvr)
251 goto out;
252
253 /* PAPR only works with book3s_64 */
254 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
255 goto out;
256
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200257 /* HV KVM can only do PAPR mode for now */
Aneesh Kumar K.Va78b55d2013-10-07 22:18:02 +0530258 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200259 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200260
Scott Woodd30f6e42011-12-20 15:34:43 +0000261#ifdef CONFIG_KVM_BOOKE_HV
262 if (!cpu_has_feature(CPU_FTR_EMB_HV))
263 goto out;
264#endif
265
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200266 r = true;
267
268out:
269 vcpu->arch.sane = r;
270 return r ? 0 : -EINVAL;
271}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530272EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200273
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500274int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
275{
276 enum emulation_result er;
277 int r;
278
Alexander Grafd69614a2014-06-18 14:53:49 +0200279 er = kvmppc_emulate_loadstore(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500280 switch (er) {
281 case EMULATE_DONE:
282 /* Future optimization: only reload non-volatiles if they were
283 * actually modified. */
284 r = RESUME_GUEST_NV;
285 break;
Mihai Caraman51f04722014-07-23 19:06:21 +0300286 case EMULATE_AGAIN:
287 r = RESUME_GUEST;
288 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500289 case EMULATE_DO_MMIO:
290 run->exit_reason = KVM_EXIT_MMIO;
291 /* We must reload nonvolatiles because "update" load/store
292 * instructions modify register state. */
293 /* Future optimization: only reload non-volatiles if they were
294 * actually modified. */
295 r = RESUME_HOST_NV;
296 break;
297 case EMULATE_FAIL:
Mihai Caraman51f04722014-07-23 19:06:21 +0300298 {
299 u32 last_inst;
300
Alexander Graf8d0eff62014-09-10 14:37:29 +0200301 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500302 /* XXX Deliver Program interrupt to guest. */
Mihai Caraman51f04722014-07-23 19:06:21 +0300303 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500304 r = RESUME_HOST;
305 break;
Mihai Caraman51f04722014-07-23 19:06:21 +0300306 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500307 default:
Alexander Graf5a331692012-12-14 23:46:03 +0100308 WARN_ON(1);
309 r = RESUME_GUEST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500310 }
311
312 return r;
313}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530314EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500315
Alexander Graf35c4a732014-06-20 13:58:16 +0200316int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
317 bool data)
318{
Alexander Grafc12fb432014-06-20 14:43:36 +0200319 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
Alexander Graf35c4a732014-06-20 13:58:16 +0200320 struct kvmppc_pte pte;
321 int r;
322
323 vcpu->stat.st++;
324
325 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
326 XLATE_WRITE, &pte);
327 if (r < 0)
328 return r;
329
330 *eaddr = pte.raddr;
331
332 if (!pte.may_write)
333 return -EPERM;
334
Alexander Grafc12fb432014-06-20 14:43:36 +0200335 /* Magic page override */
336 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
337 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
338 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
339 void *magic = vcpu->arch.shared;
340 magic += pte.eaddr & 0xfff;
341 memcpy(magic, ptr, size);
342 return EMULATE_DONE;
343 }
344
Alexander Graf35c4a732014-06-20 13:58:16 +0200345 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
346 return EMULATE_DO_MMIO;
347
348 return EMULATE_DONE;
349}
350EXPORT_SYMBOL_GPL(kvmppc_st);
351
352int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
353 bool data)
354{
Alexander Grafc12fb432014-06-20 14:43:36 +0200355 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
Alexander Graf35c4a732014-06-20 13:58:16 +0200356 struct kvmppc_pte pte;
Alexander Graf35c4a732014-06-20 13:58:16 +0200357 int rc;
358
359 vcpu->stat.ld++;
360
361 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
362 XLATE_READ, &pte);
363 if (rc)
364 return rc;
365
366 *eaddr = pte.raddr;
367
368 if (!pte.may_read)
369 return -EPERM;
370
371 if (!data && !pte.may_execute)
372 return -ENOEXEC;
373
Alexander Grafc12fb432014-06-20 14:43:36 +0200374 /* Magic page override */
375 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
376 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
377 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
378 void *magic = vcpu->arch.shared;
379 magic += pte.eaddr & 0xfff;
380 memcpy(ptr, magic, size);
381 return EMULATE_DONE;
382 }
383
Alexander Grafc45c5512014-06-20 14:17:30 +0200384 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
385 return EMULATE_DO_MMIO;
Alexander Graf35c4a732014-06-20 13:58:16 +0200386
387 return EMULATE_DONE;
Alexander Graf35c4a732014-06-20 13:58:16 +0200388}
389EXPORT_SYMBOL_GPL(kvmppc_ld);
390
Radim Krčmář13a34e02014-08-28 15:13:03 +0200391int kvm_arch_hardware_enable(void)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500392{
Alexander Graf10474ae2009-09-15 11:37:46 +0200393 return 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500394}
395
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500396int kvm_arch_hardware_setup(void)
397{
398 return 0;
399}
400
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500401void kvm_arch_check_processor_compat(void *rtn)
402{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600403 *(int *)rtn = kvmppc_core_check_processor_compat();
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500404}
405
Carsten Ottee08b9632012-01-04 10:25:20 +0100406int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500407{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530408 struct kvmppc_ops *kvm_ops = NULL;
409 /*
410 * if we have both HV and PR enabled, default is HV
411 */
412 if (type == 0) {
413 if (kvmppc_hv_ops)
414 kvm_ops = kvmppc_hv_ops;
415 else
416 kvm_ops = kvmppc_pr_ops;
417 if (!kvm_ops)
418 goto err_out;
419 } else if (type == KVM_VM_PPC_HV) {
420 if (!kvmppc_hv_ops)
421 goto err_out;
422 kvm_ops = kvmppc_hv_ops;
423 } else if (type == KVM_VM_PPC_PR) {
424 if (!kvmppc_pr_ops)
425 goto err_out;
426 kvm_ops = kvmppc_pr_ops;
427 } else
428 goto err_out;
Carsten Ottee08b9632012-01-04 10:25:20 +0100429
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530430 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
431 return -ENOENT;
432
433 kvm->arch.kvm_ops = kvm_ops;
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000434 return kvmppc_core_init_vm(kvm);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530435err_out:
436 return -EINVAL;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500437}
438
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100439void kvm_arch_destroy_vm(struct kvm *kvm)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500440{
441 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300442 struct kvm_vcpu *vcpu;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500443
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600444#ifdef CONFIG_KVM_XICS
445 /*
446 * We call kick_all_cpus_sync() to ensure that all
447 * CPUs have executed any pending IPIs before we
448 * continue and free VCPUs structures below.
449 */
450 if (is_kvmppc_hv_enabled(kvm))
451 kick_all_cpus_sync();
452#endif
453
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300454 kvm_for_each_vcpu(i, vcpu, kvm)
455 kvm_arch_vcpu_free(vcpu);
456
457 mutex_lock(&kvm->lock);
458 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
459 kvm->vcpus[i] = NULL;
460
461 atomic_set(&kvm->online_vcpus, 0);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000462
463 kvmppc_core_destroy_vm(kvm);
464
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300465 mutex_unlock(&kvm->lock);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530466
467 /* drop the module reference */
468 module_put(kvm->arch.kvm_ops->owner);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500469}
470
Alexander Graf784aa3d2014-07-14 18:27:35 +0200471int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500472{
473 int r;
Alexander Graf7a587772014-07-14 18:55:19 +0200474 /* Assume we're using HV mode when the HV module is loaded */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530475 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500476
Alexander Graf7a587772014-07-14 18:55:19 +0200477 if (kvm) {
478 /*
479 * Hooray - we know which VM type we're running on. Depend on
480 * that rather than the guess above.
481 */
482 hv_enabled = is_kvmppc_hv_enabled(kvm);
483 }
484
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500485 switch (ext) {
Scott Wood5ce941e2011-04-27 17:24:21 -0500486#ifdef CONFIG_BOOKE
487 case KVM_CAP_PPC_BOOKE_SREGS:
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000488 case KVM_CAP_PPC_BOOKE_WATCHDOG:
Alexander Graf1c810632013-01-04 18:12:48 +0100489 case KVM_CAP_PPC_EPR:
Scott Wood5ce941e2011-04-27 17:24:21 -0500490#else
Alexander Grafe15a1132009-11-30 03:02:02 +0000491 case KVM_CAP_PPC_SEGSTATE:
Alexander Graf1022fc32011-09-14 21:45:23 +0200492 case KVM_CAP_PPC_HIOR:
Alexander Graf930b4122011-08-08 17:29:42 +0200493 case KVM_CAP_PPC_PAPR:
Scott Wood5ce941e2011-04-27 17:24:21 -0500494#endif
Alexander Graf18978762010-03-24 21:48:18 +0100495 case KVM_CAP_PPC_UNSET_IRQ:
Alexander Graf7b4203e2010-08-30 13:50:45 +0200496 case KVM_CAP_PPC_IRQ_LEVEL:
Alexander Graf71fbfd52010-03-24 21:48:29 +0100497 case KVM_CAP_ENABLE_CAP:
Paul Mackerras699a0ea2014-06-02 11:02:59 +1000498 case KVM_CAP_ENABLE_CAP_VM:
Alexander Grafe24ed812011-09-14 10:02:41 +0200499 case KVM_CAP_ONE_REG:
Alexander Graf0e673fb2012-10-09 00:06:20 +0200500 case KVM_CAP_IOEVENTFD:
Scott Wood5df554a2013-04-12 14:08:46 +0000501 case KVM_CAP_DEVICE_CTRL:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000502 r = 1;
503 break;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000504 case KVM_CAP_PPC_PAIRED_SINGLES:
Alexander Grafad0a0482010-03-24 21:48:30 +0100505 case KVM_CAP_PPC_OSI:
Alexander Graf15711e92010-07-29 14:48:08 +0200506 case KVM_CAP_PPC_GET_PVINFO:
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000507#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -0500508 case KVM_CAP_SW_TLB:
509#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530510 /* We support this only for PR */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530511 r = !hv_enabled;
Alexander Grafe15a1132009-11-30 03:02:02 +0000512 break;
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530513#ifdef CONFIG_KVM_MMIO
Laurent Vivier588968b2008-05-30 16:05:56 +0200514 case KVM_CAP_COALESCED_MMIO:
515 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
516 break;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000517#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530518#ifdef CONFIG_KVM_MPIC
519 case KVM_CAP_IRQ_MPIC:
520 r = 1;
521 break;
522#endif
523
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000524#ifdef CONFIG_PPC_BOOK3S_64
David Gibson54738c02011-06-29 00:22:41 +0000525 case KVM_CAP_SPAPR_TCE:
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +1100526 case KVM_CAP_SPAPR_TCE_64:
Paul Mackerras32fad282012-05-04 02:32:53 +0000527 case KVM_CAP_PPC_ALLOC_HTAB:
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000528 case KVM_CAP_PPC_RTAS:
Alexander Graff2e91042014-05-22 17:40:15 +0200529 case KVM_CAP_PPC_FIXUP_HCALL:
Paul Mackerras699a0ea2014-06-02 11:02:59 +1000530 case KVM_CAP_PPC_ENABLE_HCALL:
Paul Mackerras5975a2e2013-04-27 00:28:37 +0000531#ifdef CONFIG_KVM_XICS
532 case KVM_CAP_IRQ_XICS:
533#endif
David Gibson54738c02011-06-29 00:22:41 +0000534 r = 1;
535 break;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000536#endif /* CONFIG_PPC_BOOK3S_64 */
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530537#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerras371fefd2011-06-29 00:23:08 +0000538 case KVM_CAP_PPC_SMT:
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530539 if (hv_enabled)
Michael Ellerman3102f782014-05-23 18:15:29 +1000540 r = threads_per_subcore;
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530541 else
542 r = 0;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000543 break;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000544 case KVM_CAP_PPC_RMA:
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100545 r = 0;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000546 break;
Michael Ellermane928e9c2015-03-20 20:39:41 +1100547 case KVM_CAP_PPC_HWRNG:
548 r = kvmppc_hwrng_present();
549 break;
David Gibson54738c02011-06-29 00:22:41 +0000550#endif
Alexander Graff4800b12012-08-07 10:24:14 +0200551 case KVM_CAP_SYNC_MMU:
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530552#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100553 r = hv_enabled;
Alexander Graff4800b12012-08-07 10:24:14 +0200554#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
555 r = 1;
556#else
557 r = 0;
Paul Mackerrasa2932922012-11-19 22:57:20 +0000558#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530559 break;
560#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasa2932922012-11-19 22:57:20 +0000561 case KVM_CAP_PPC_HTAB_FD:
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530562 r = hv_enabled;
Paul Mackerrasa2932922012-11-19 22:57:20 +0000563 break;
Alexander Graff4800b12012-08-07 10:24:14 +0200564#endif
Matt Evansb5434032011-12-07 16:55:57 +0000565 case KVM_CAP_NR_VCPUS:
566 /*
567 * Recommending a number of CPUs is somewhat arbitrary; we
568 * return the number of present CPUs for -HV (since a host
569 * will have secondary threads "offline"), and for other KVM
570 * implementations just count online CPUs.
571 */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530572 if (hv_enabled)
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530573 r = num_present_cpus();
574 else
575 r = num_online_cpus();
Matt Evansb5434032011-12-07 16:55:57 +0000576 break;
Nikunj A Dadhaniabfec5c2c2015-10-16 10:27:53 +0530577 case KVM_CAP_NR_MEMSLOTS:
578 r = KVM_USER_MEM_SLOTS;
579 break;
Matt Evansb5434032011-12-07 16:55:57 +0000580 case KVM_CAP_MAX_VCPUS:
581 r = KVM_MAX_VCPUS;
582 break;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000583#ifdef CONFIG_PPC_BOOK3S_64
584 case KVM_CAP_PPC_GET_SMMU_INFO:
585 r = 1;
586 break;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100587 case KVM_CAP_SPAPR_MULTITCE:
588 r = 1;
589 break;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000590#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500591 default:
592 r = 0;
593 break;
594 }
595 return r;
596
597}
598
599long kvm_arch_dev_ioctl(struct file *filp,
600 unsigned int ioctl, unsigned long arg)
601{
602 return -EINVAL;
603}
604
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530605void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900606 struct kvm_memory_slot *dont)
607{
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530608 kvmppc_core_free_memslot(kvm, free, dont);
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900609}
610
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530611int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
612 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900613{
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530614 return kvmppc_core_create_memslot(kvm, slot, npages);
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900615}
616
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200617int kvm_arch_prepare_memory_region(struct kvm *kvm,
Takuya Yoshikawa462fce42013-02-27 19:41:56 +0900618 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200619 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +0900620 enum kvm_mr_change change)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500621{
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000622 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500623}
624
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200625void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200626 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900627 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200628 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900629 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200630{
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200631 kvmppc_core_commit_memory_region(kvm, mem, old, new);
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200632}
633
Marcelo Tosatti2df72e92012-08-24 15:54:57 -0300634void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
635 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300636{
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000637 kvmppc_core_flush_memslot(kvm, slot);
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300638}
639
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500640struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
641{
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600642 struct kvm_vcpu *vcpu;
643 vcpu = kvmppc_core_vcpu_create(kvm, id);
Matt Evans03cdab52011-12-06 21:19:42 +0000644 if (!IS_ERR(vcpu)) {
645 vcpu->arch.wqp = &vcpu->wq;
Wei Yongjun06056bf2010-03-09 14:13:43 +0800646 kvmppc_create_vcpu_debugfs(vcpu, id);
Matt Evans03cdab52011-12-06 21:19:42 +0000647 }
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600648 return vcpu;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500649}
650
Dominik Dingel31928aa2014-12-04 15:47:07 +0100651void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200652{
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200653}
654
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500655void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
656{
Alexander Grafa5954052010-02-22 16:52:14 +0100657 /* Make sure we're not using the vcpu anymore */
658 hrtimer_cancel(&vcpu->arch.dec_timer);
Alexander Grafa5954052010-02-22 16:52:14 +0100659
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600660 kvmppc_remove_vcpu_debugfs(vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000661
662 switch (vcpu->arch.irq_type) {
663 case KVMPPC_IRQ_MPIC:
664 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
665 break;
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000666 case KVMPPC_IRQ_XICS:
667 kvmppc_xics_free_icp(vcpu);
668 break;
Scott Woodeb1e4f42013-04-12 14:08:47 +0000669 }
670
Hollis Blancharddb93f572008-11-05 09:36:18 -0600671 kvmppc_core_vcpu_free(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500672}
673
674void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
675{
676 kvm_arch_vcpu_free(vcpu);
677}
678
679int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
680{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600681 return kvmppc_core_pending_dec(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500682}
683
Thomas Huth5358a962015-05-22 09:25:02 +0200684static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
Alexander Graf544c6762009-11-02 12:02:31 +0000685{
686 struct kvm_vcpu *vcpu;
687
688 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
Mihai Caramand02d4d12014-09-01 17:19:56 +0300689 kvmppc_decrementer_func(vcpu);
Alexander Graf544c6762009-11-02 12:02:31 +0000690
691 return HRTIMER_NORESTART;
692}
693
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500694int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
695{
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000696 int ret;
697
Alexander Graf544c6762009-11-02 12:02:31 +0000698 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
Alexander Graf544c6762009-11-02 12:02:31 +0000699 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000700 vcpu->arch.dec_expires = ~(u64)0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500701
Bharat Bhushan09000ad2011-03-25 10:32:13 +0530702#ifdef CONFIG_KVM_EXIT_TIMING
703 mutex_init(&vcpu->arch.exit_timing_lock);
704#endif
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000705 ret = kvmppc_subarch_vcpu_init(vcpu);
706 return ret;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500707}
708
709void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
710{
Hollis Blanchardecc09812009-01-03 16:22:59 -0600711 kvmppc_mmu_destroy(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000712 kvmppc_subarch_vcpu_uninit(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500713}
714
715void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
716{
Scott Woodeab17672011-04-27 17:24:10 -0500717#ifdef CONFIG_BOOKE
718 /*
719 * vrsave (formerly usprg0) isn't used by Linux, but may
720 * be used by the guest.
721 *
722 * On non-booke this is associated with Altivec and
723 * is handled by code in book3s.c.
724 */
725 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
726#endif
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600727 kvmppc_core_vcpu_load(vcpu, cpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500728}
729
730void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
731{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600732 kvmppc_core_vcpu_put(vcpu);
Scott Woodeab17672011-04-27 17:24:10 -0500733#ifdef CONFIG_BOOKE
734 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
735#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500736}
737
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500738static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
739 struct kvm_run *run)
740{
Denis Kirjanov69b61832010-06-11 11:23:26 +0000741 u64 uninitialized_var(gpr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500742
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100743 if (run->mmio.len > sizeof(gpr)) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500744 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
745 return;
746 }
747
David Gibsond078eed2015-02-03 16:36:24 +1100748 if (!vcpu->arch.mmio_host_swabbed) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500749 switch (run->mmio.len) {
Alexander Grafb104d062010-02-19 11:00:29 +0100750 case 8: gpr = *(u64 *)run->mmio.data; break;
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100751 case 4: gpr = *(u32 *)run->mmio.data; break;
752 case 2: gpr = *(u16 *)run->mmio.data; break;
753 case 1: gpr = *(u8 *)run->mmio.data; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500754 }
755 } else {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500756 switch (run->mmio.len) {
David Gibsond078eed2015-02-03 16:36:24 +1100757 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
758 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
759 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100760 case 1: gpr = *(u8 *)run->mmio.data; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500761 }
762 }
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100763
Alexander Graf3587d532010-02-19 11:00:30 +0100764 if (vcpu->arch.mmio_sign_extend) {
765 switch (run->mmio.len) {
766#ifdef CONFIG_PPC64
767 case 4:
768 gpr = (s64)(s32)gpr;
769 break;
770#endif
771 case 2:
772 gpr = (s64)(s16)gpr;
773 break;
774 case 1:
775 gpr = (s64)(s8)gpr;
776 break;
777 }
778 }
779
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100780 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
Alexander Grafb104d062010-02-19 11:00:29 +0100781
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100782 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
783 case KVM_MMIO_REG_GPR:
Alexander Grafb104d062010-02-19 11:00:29 +0100784 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
785 break;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100786 case KVM_MMIO_REG_FPR:
Paul Mackerrasefff1912013-10-15 20:43:02 +1100787 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +0100788 break;
Alexander Graf287d5612010-04-01 15:33:21 +0200789#ifdef CONFIG_PPC_BOOK3S
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100790 case KVM_MMIO_REG_QPR:
791 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +0100792 break;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100793 case KVM_MMIO_REG_FQPR:
Paul Mackerrasefff1912013-10-15 20:43:02 +1100794 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100795 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +0100796 break;
Alexander Graf287d5612010-04-01 15:33:21 +0200797#endif
Alexander Grafb104d062010-02-19 11:00:29 +0100798 default:
799 BUG();
800 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500801}
802
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000803static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
804 unsigned int rt, unsigned int bytes,
805 int is_default_endian, int sign_extend)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500806{
Scott Wooded840ee2013-04-26 14:53:39 +0000807 int idx, ret;
David Gibsond078eed2015-02-03 16:36:24 +1100808 bool host_swabbed;
Cédric Le Goater73601772014-01-09 11:51:16 +0100809
David Gibsond078eed2015-02-03 16:36:24 +1100810 /* Pity C doesn't have a logical XOR operator */
Cédric Le Goater73601772014-01-09 11:51:16 +0100811 if (kvmppc_need_byteswap(vcpu)) {
David Gibsond078eed2015-02-03 16:36:24 +1100812 host_swabbed = is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100813 } else {
David Gibsond078eed2015-02-03 16:36:24 +1100814 host_swabbed = !is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100815 }
Scott Wooded840ee2013-04-26 14:53:39 +0000816
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500817 if (bytes > sizeof(run->mmio.data)) {
818 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
819 run->mmio.len);
820 }
821
822 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
823 run->mmio.len = bytes;
824 run->mmio.is_write = 0;
825
826 vcpu->arch.io_gpr = rt;
David Gibsond078eed2015-02-03 16:36:24 +1100827 vcpu->arch.mmio_host_swabbed = host_swabbed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500828 vcpu->mmio_needed = 1;
829 vcpu->mmio_is_write = 0;
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000830 vcpu->arch.mmio_sign_extend = sign_extend;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500831
Scott Wooded840ee2013-04-26 14:53:39 +0000832 idx = srcu_read_lock(&vcpu->kvm->srcu);
833
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000834 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
Scott Wooded840ee2013-04-26 14:53:39 +0000835 bytes, &run->mmio.data);
836
837 srcu_read_unlock(&vcpu->kvm->srcu, idx);
838
839 if (!ret) {
Alexander Graf0e673fb2012-10-09 00:06:20 +0200840 kvmppc_complete_mmio_load(vcpu, run);
841 vcpu->mmio_needed = 0;
842 return EMULATE_DONE;
843 }
844
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500845 return EMULATE_DO_MMIO;
846}
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000847
848int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
849 unsigned int rt, unsigned int bytes,
850 int is_default_endian)
851{
852 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
853}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530854EXPORT_SYMBOL_GPL(kvmppc_handle_load);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500855
Alexander Graf3587d532010-02-19 11:00:30 +0100856/* Same as above, but sign extends */
857int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +0100858 unsigned int rt, unsigned int bytes,
859 int is_default_endian)
Alexander Graf3587d532010-02-19 11:00:30 +0100860{
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000861 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
Alexander Graf3587d532010-02-19 11:00:30 +0100862}
863
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500864int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +0100865 u64 val, unsigned int bytes, int is_default_endian)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500866{
867 void *data = run->mmio.data;
Scott Wooded840ee2013-04-26 14:53:39 +0000868 int idx, ret;
David Gibsond078eed2015-02-03 16:36:24 +1100869 bool host_swabbed;
Cédric Le Goater73601772014-01-09 11:51:16 +0100870
David Gibsond078eed2015-02-03 16:36:24 +1100871 /* Pity C doesn't have a logical XOR operator */
Cédric Le Goater73601772014-01-09 11:51:16 +0100872 if (kvmppc_need_byteswap(vcpu)) {
David Gibsond078eed2015-02-03 16:36:24 +1100873 host_swabbed = is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100874 } else {
David Gibsond078eed2015-02-03 16:36:24 +1100875 host_swabbed = !is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100876 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500877
878 if (bytes > sizeof(run->mmio.data)) {
879 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
880 run->mmio.len);
881 }
882
883 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
884 run->mmio.len = bytes;
885 run->mmio.is_write = 1;
886 vcpu->mmio_needed = 1;
887 vcpu->mmio_is_write = 1;
888
889 /* Store the value at the lowest bytes in 'data'. */
David Gibsond078eed2015-02-03 16:36:24 +1100890 if (!host_swabbed) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500891 switch (bytes) {
Alexander Grafb104d062010-02-19 11:00:29 +0100892 case 8: *(u64 *)data = val; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500893 case 4: *(u32 *)data = val; break;
894 case 2: *(u16 *)data = val; break;
895 case 1: *(u8 *)data = val; break;
896 }
897 } else {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500898 switch (bytes) {
David Gibsond078eed2015-02-03 16:36:24 +1100899 case 8: *(u64 *)data = swab64(val); break;
900 case 4: *(u32 *)data = swab32(val); break;
901 case 2: *(u16 *)data = swab16(val); break;
902 case 1: *(u8 *)data = val; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500903 }
904 }
905
Scott Wooded840ee2013-04-26 14:53:39 +0000906 idx = srcu_read_lock(&vcpu->kvm->srcu);
907
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000908 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
Scott Wooded840ee2013-04-26 14:53:39 +0000909 bytes, &run->mmio.data);
910
911 srcu_read_unlock(&vcpu->kvm->srcu, idx);
912
913 if (!ret) {
Alexander Graf0e673fb2012-10-09 00:06:20 +0200914 vcpu->mmio_needed = 0;
915 return EMULATE_DONE;
916 }
917
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500918 return EMULATE_DO_MMIO;
919}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530920EXPORT_SYMBOL_GPL(kvmppc_handle_store);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500921
Mihai Caraman8a41ea52014-08-20 16:36:24 +0300922int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
923{
924 int r = 0;
925 union kvmppc_one_reg val;
926 int size;
927
928 size = one_reg_size(reg->id);
929 if (size > sizeof(val))
930 return -EINVAL;
931
932 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
933 if (r == -EINVAL) {
934 r = 0;
935 switch (reg->id) {
Mihai Caraman3840edc2014-08-20 16:36:25 +0300936#ifdef CONFIG_ALTIVEC
937 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
938 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
939 r = -ENXIO;
940 break;
941 }
Greg Kurzb4d7f162016-01-13 18:28:17 +0100942 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
Mihai Caraman3840edc2014-08-20 16:36:25 +0300943 break;
944 case KVM_REG_PPC_VSCR:
945 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
946 r = -ENXIO;
947 break;
948 }
Greg Kurzb4d7f162016-01-13 18:28:17 +0100949 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
Mihai Caraman3840edc2014-08-20 16:36:25 +0300950 break;
951 case KVM_REG_PPC_VRSAVE:
Greg Kurzb4d7f162016-01-13 18:28:17 +0100952 val = get_reg_val(reg->id, vcpu->arch.vrsave);
Mihai Caraman3840edc2014-08-20 16:36:25 +0300953 break;
954#endif /* CONFIG_ALTIVEC */
Mihai Caraman8a41ea52014-08-20 16:36:24 +0300955 default:
956 r = -EINVAL;
957 break;
958 }
959 }
960
961 if (r)
962 return r;
963
964 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
965 r = -EFAULT;
966
967 return r;
968}
969
970int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
971{
972 int r;
973 union kvmppc_one_reg val;
974 int size;
975
976 size = one_reg_size(reg->id);
977 if (size > sizeof(val))
978 return -EINVAL;
979
980 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
981 return -EFAULT;
982
983 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
984 if (r == -EINVAL) {
985 r = 0;
986 switch (reg->id) {
Mihai Caraman3840edc2014-08-20 16:36:25 +0300987#ifdef CONFIG_ALTIVEC
988 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
989 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
990 r = -ENXIO;
991 break;
992 }
Greg Kurzb4d7f162016-01-13 18:28:17 +0100993 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
Mihai Caraman3840edc2014-08-20 16:36:25 +0300994 break;
995 case KVM_REG_PPC_VSCR:
996 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
997 r = -ENXIO;
998 break;
999 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001000 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001001 break;
1002 case KVM_REG_PPC_VRSAVE:
Greg Kurzb4d7f162016-01-13 18:28:17 +01001003 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1004 r = -ENXIO;
1005 break;
1006 }
1007 vcpu->arch.vrsave = set_reg_val(reg->id, val);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001008 break;
1009#endif /* CONFIG_ALTIVEC */
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001010 default:
1011 r = -EINVAL;
1012 break;
1013 }
1014 }
1015
1016 return r;
1017}
1018
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001019int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1020{
1021 int r;
1022 sigset_t sigsaved;
1023
1024 if (vcpu->sigset_active)
1025 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1026
1027 if (vcpu->mmio_needed) {
1028 if (!vcpu->mmio_is_write)
1029 kvmppc_complete_mmio_load(vcpu, run);
1030 vcpu->mmio_needed = 0;
Alexander Grafad0a0482010-03-24 21:48:30 +01001031 } else if (vcpu->arch.osi_needed) {
1032 u64 *gprs = run->osi.gprs;
1033 int i;
1034
1035 for (i = 0; i < 32; i++)
1036 kvmppc_set_gpr(vcpu, i, gprs[i]);
1037 vcpu->arch.osi_needed = 0;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001038 } else if (vcpu->arch.hcall_needed) {
1039 int i;
1040
1041 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1042 for (i = 0; i < 9; ++i)
1043 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1044 vcpu->arch.hcall_needed = 0;
Alexander Graf1c810632013-01-04 18:12:48 +01001045#ifdef CONFIG_BOOKE
1046 } else if (vcpu->arch.epr_needed) {
1047 kvmppc_set_epr(vcpu, run->epr.epr);
1048 vcpu->arch.epr_needed = 0;
1049#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001050 }
1051
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001052 r = kvmppc_vcpu_run(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001053
1054 if (vcpu->sigset_active)
1055 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1056
1057 return r;
1058}
1059
1060int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1061{
Paul Mackerras19ccb762011-07-23 17:42:46 +10001062 if (irq->irq == KVM_INTERRUPT_UNSET) {
Paul Mackerras4fe27d22013-02-14 14:00:25 +00001063 kvmppc_core_dequeue_external(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001064 return 0;
1065 }
Hollis Blanchard45c5eb62008-04-25 17:55:49 -05001066
Paul Mackerras19ccb762011-07-23 17:42:46 +10001067 kvmppc_core_queue_external(vcpu, irq);
Christoffer Dallb6d33832012-03-08 16:44:24 -05001068
Scott Wooddfd4d472011-11-17 12:39:59 +00001069 kvm_vcpu_kick(vcpu);
Hollis Blanchard45c5eb62008-04-25 17:55:49 -05001070
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001071 return 0;
1072}
1073
Alexander Graf71fbfd52010-03-24 21:48:29 +01001074static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1075 struct kvm_enable_cap *cap)
1076{
1077 int r;
1078
1079 if (cap->flags)
1080 return -EINVAL;
1081
1082 switch (cap->cap) {
Alexander Grafad0a0482010-03-24 21:48:30 +01001083 case KVM_CAP_PPC_OSI:
1084 r = 0;
1085 vcpu->arch.osi_enabled = true;
1086 break;
Alexander Graf930b4122011-08-08 17:29:42 +02001087 case KVM_CAP_PPC_PAPR:
1088 r = 0;
1089 vcpu->arch.papr_enabled = true;
1090 break;
Alexander Graf1c810632013-01-04 18:12:48 +01001091 case KVM_CAP_PPC_EPR:
1092 r = 0;
Scott Wood5df554a2013-04-12 14:08:46 +00001093 if (cap->args[0])
1094 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1095 else
1096 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
Alexander Graf1c810632013-01-04 18:12:48 +01001097 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001098#ifdef CONFIG_BOOKE
1099 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1100 r = 0;
1101 vcpu->arch.watchdog_enabled = true;
1102 break;
1103#endif
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001104#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -05001105 case KVM_CAP_SW_TLB: {
1106 struct kvm_config_tlb cfg;
1107 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1108
1109 r = -EFAULT;
1110 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1111 break;
1112
1113 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1114 break;
1115 }
1116#endif
Scott Woodeb1e4f42013-04-12 14:08:47 +00001117#ifdef CONFIG_KVM_MPIC
1118 case KVM_CAP_IRQ_MPIC: {
Al Viro70abade2013-08-30 15:04:22 -04001119 struct fd f;
Scott Woodeb1e4f42013-04-12 14:08:47 +00001120 struct kvm_device *dev;
1121
1122 r = -EBADF;
Al Viro70abade2013-08-30 15:04:22 -04001123 f = fdget(cap->args[0]);
1124 if (!f.file)
Scott Woodeb1e4f42013-04-12 14:08:47 +00001125 break;
1126
1127 r = -EPERM;
Al Viro70abade2013-08-30 15:04:22 -04001128 dev = kvm_device_from_filp(f.file);
Scott Woodeb1e4f42013-04-12 14:08:47 +00001129 if (dev)
1130 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1131
Al Viro70abade2013-08-30 15:04:22 -04001132 fdput(f);
Scott Woodeb1e4f42013-04-12 14:08:47 +00001133 break;
1134 }
1135#endif
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001136#ifdef CONFIG_KVM_XICS
1137 case KVM_CAP_IRQ_XICS: {
Al Viro70abade2013-08-30 15:04:22 -04001138 struct fd f;
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001139 struct kvm_device *dev;
1140
1141 r = -EBADF;
Al Viro70abade2013-08-30 15:04:22 -04001142 f = fdget(cap->args[0]);
1143 if (!f.file)
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001144 break;
1145
1146 r = -EPERM;
Al Viro70abade2013-08-30 15:04:22 -04001147 dev = kvm_device_from_filp(f.file);
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001148 if (dev)
1149 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1150
Al Viro70abade2013-08-30 15:04:22 -04001151 fdput(f);
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001152 break;
1153 }
1154#endif /* CONFIG_KVM_XICS */
Alexander Graf71fbfd52010-03-24 21:48:29 +01001155 default:
1156 r = -EINVAL;
1157 break;
1158 }
1159
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001160 if (!r)
1161 r = kvmppc_sanity_check(vcpu);
1162
Alexander Graf71fbfd52010-03-24 21:48:29 +01001163 return r;
1164}
1165
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001166int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1167 struct kvm_mp_state *mp_state)
1168{
1169 return -EINVAL;
1170}
1171
1172int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1173 struct kvm_mp_state *mp_state)
1174{
1175 return -EINVAL;
1176}
1177
1178long kvm_arch_vcpu_ioctl(struct file *filp,
1179 unsigned int ioctl, unsigned long arg)
1180{
1181 struct kvm_vcpu *vcpu = filp->private_data;
1182 void __user *argp = (void __user *)arg;
1183 long r;
1184
Avi Kivity93736622010-05-13 12:35:17 +03001185 switch (ioctl) {
1186 case KVM_INTERRUPT: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001187 struct kvm_interrupt irq;
1188 r = -EFAULT;
1189 if (copy_from_user(&irq, argp, sizeof(irq)))
Avi Kivity93736622010-05-13 12:35:17 +03001190 goto out;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001191 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
Avi Kivity93736622010-05-13 12:35:17 +03001192 goto out;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001193 }
Avi Kivity19483d12010-05-13 12:30:43 +03001194
Alexander Graf71fbfd52010-03-24 21:48:29 +01001195 case KVM_ENABLE_CAP:
1196 {
1197 struct kvm_enable_cap cap;
1198 r = -EFAULT;
1199 if (copy_from_user(&cap, argp, sizeof(cap)))
1200 goto out;
1201 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1202 break;
1203 }
Scott Wooddc83b8b2011-08-18 15:25:21 -05001204
Alexander Grafe24ed812011-09-14 10:02:41 +02001205 case KVM_SET_ONE_REG:
1206 case KVM_GET_ONE_REG:
1207 {
1208 struct kvm_one_reg reg;
1209 r = -EFAULT;
1210 if (copy_from_user(&reg, argp, sizeof(reg)))
1211 goto out;
1212 if (ioctl == KVM_SET_ONE_REG)
1213 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1214 else
1215 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1216 break;
1217 }
1218
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001219#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -05001220 case KVM_DIRTY_TLB: {
1221 struct kvm_dirty_tlb dirty;
1222 r = -EFAULT;
1223 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1224 goto out;
1225 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1226 break;
1227 }
1228#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001229 default:
1230 r = -EINVAL;
1231 }
1232
1233out:
1234 return r;
1235}
1236
Carsten Otte5b1c1492012-01-04 10:25:23 +01001237int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1238{
1239 return VM_FAULT_SIGBUS;
1240}
1241
Alexander Graf15711e92010-07-29 14:48:08 +02001242static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1243{
Stuart Yoder784bafa2012-07-03 05:48:51 +00001244 u32 inst_nop = 0x60000000;
1245#ifdef CONFIG_KVM_BOOKE_HV
1246 u32 inst_sc1 = 0x44000022;
Alexander Graf27431032014-04-24 13:39:16 +02001247 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1248 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1249 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1250 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
Stuart Yoder784bafa2012-07-03 05:48:51 +00001251#else
Alexander Graf15711e92010-07-29 14:48:08 +02001252 u32 inst_lis = 0x3c000000;
1253 u32 inst_ori = 0x60000000;
Alexander Graf15711e92010-07-29 14:48:08 +02001254 u32 inst_sc = 0x44000002;
1255 u32 inst_imm_mask = 0xffff;
1256
1257 /*
1258 * The hypercall to get into KVM from within guest context is as
1259 * follows:
1260 *
1261 * lis r0, r0, KVM_SC_MAGIC_R0@h
1262 * ori r0, KVM_SC_MAGIC_R0@l
1263 * sc
1264 * nop
1265 */
Alexander Graf27431032014-04-24 13:39:16 +02001266 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1267 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1268 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1269 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
Stuart Yoder784bafa2012-07-03 05:48:51 +00001270#endif
Alexander Graf15711e92010-07-29 14:48:08 +02001271
Liu Yu-B132019202e072012-07-03 05:48:52 +00001272 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1273
Alexander Graf15711e92010-07-29 14:48:08 +02001274 return 0;
1275}
1276
Alexander Graf5efdb4b2013-04-17 00:37:57 +02001277int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1278 bool line_status)
1279{
1280 if (!irqchip_in_kernel(kvm))
1281 return -ENXIO;
1282
1283 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1284 irq_event->irq, irq_event->level,
1285 line_status);
1286 return 0;
1287}
1288
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001289
1290static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1291 struct kvm_enable_cap *cap)
1292{
1293 int r;
1294
1295 if (cap->flags)
1296 return -EINVAL;
1297
1298 switch (cap->cap) {
1299#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1300 case KVM_CAP_PPC_ENABLE_HCALL: {
1301 unsigned long hcall = cap->args[0];
1302
1303 r = -EINVAL;
1304 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1305 cap->args[1] > 1)
1306 break;
Paul Mackerrasae2113a2014-06-02 11:03:00 +10001307 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1308 break;
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001309 if (cap->args[1])
1310 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1311 else
1312 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1313 r = 0;
1314 break;
1315 }
1316#endif
1317 default:
1318 r = -EINVAL;
1319 break;
1320 }
1321
1322 return r;
1323}
1324
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001325long kvm_arch_vm_ioctl(struct file *filp,
1326 unsigned int ioctl, unsigned long arg)
1327{
Scott Wood5df554a2013-04-12 14:08:46 +00001328 struct kvm *kvm __maybe_unused = filp->private_data;
Alexander Graf15711e92010-07-29 14:48:08 +02001329 void __user *argp = (void __user *)arg;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001330 long r;
1331
1332 switch (ioctl) {
Alexander Graf15711e92010-07-29 14:48:08 +02001333 case KVM_PPC_GET_PVINFO: {
1334 struct kvm_ppc_pvinfo pvinfo;
Vasiliy Kulikovd8cdddc2010-10-30 13:04:24 +04001335 memset(&pvinfo, 0, sizeof(pvinfo));
Alexander Graf15711e92010-07-29 14:48:08 +02001336 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1337 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1338 r = -EFAULT;
1339 goto out;
1340 }
1341
1342 break;
1343 }
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001344 case KVM_ENABLE_CAP:
1345 {
1346 struct kvm_enable_cap cap;
1347 r = -EFAULT;
1348 if (copy_from_user(&cap, argp, sizeof(cap)))
1349 goto out;
1350 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1351 break;
1352 }
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001353#ifdef CONFIG_PPC_BOOK3S_64
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11001354 case KVM_CREATE_SPAPR_TCE_64: {
1355 struct kvm_create_spapr_tce_64 create_tce_64;
1356
1357 r = -EFAULT;
1358 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
1359 goto out;
1360 if (create_tce_64.flags) {
1361 r = -EINVAL;
1362 goto out;
1363 }
1364 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1365 goto out;
1366 }
David Gibson54738c02011-06-29 00:22:41 +00001367 case KVM_CREATE_SPAPR_TCE: {
1368 struct kvm_create_spapr_tce create_tce;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11001369 struct kvm_create_spapr_tce_64 create_tce_64;
David Gibson54738c02011-06-29 00:22:41 +00001370
1371 r = -EFAULT;
1372 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1373 goto out;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11001374
1375 create_tce_64.liobn = create_tce.liobn;
1376 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
1377 create_tce_64.offset = 0;
1378 create_tce_64.size = create_tce.window_size >>
1379 IOMMU_PAGE_SHIFT_4K;
1380 create_tce_64.flags = 0;
1381 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
David Gibson54738c02011-06-29 00:22:41 +00001382 goto out;
1383 }
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001384 case KVM_PPC_GET_SMMU_INFO: {
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001385 struct kvm_ppc_smmu_info info;
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301386 struct kvm *kvm = filp->private_data;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001387
1388 memset(&info, 0, sizeof(info));
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301389 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001390 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1391 r = -EFAULT;
1392 break;
1393 }
Michael Ellerman8e591cb2013-04-17 20:30:00 +00001394 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1395 struct kvm *kvm = filp->private_data;
1396
1397 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1398 break;
1399 }
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301400 default: {
1401 struct kvm *kvm = filp->private_data;
1402 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1403 }
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301404#else /* CONFIG_PPC_BOOK3S_64 */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001405 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001406 r = -ENOTTY;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301407#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001408 }
Alexander Graf15711e92010-07-29 14:48:08 +02001409out:
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001410 return r;
1411}
1412
Scott Wood043cc4d2011-12-20 15:34:20 +00001413static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1414static unsigned long nr_lpids;
1415
1416long kvmppc_alloc_lpid(void)
1417{
1418 long lpid;
1419
1420 do {
1421 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1422 if (lpid >= nr_lpids) {
1423 pr_err("%s: No LPIDs free\n", __func__);
1424 return -ENOMEM;
1425 }
1426 } while (test_and_set_bit(lpid, lpid_inuse));
1427
1428 return lpid;
1429}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301430EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001431
1432void kvmppc_claim_lpid(long lpid)
1433{
1434 set_bit(lpid, lpid_inuse);
1435}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301436EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001437
1438void kvmppc_free_lpid(long lpid)
1439{
1440 clear_bit(lpid, lpid_inuse);
1441}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301442EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001443
1444void kvmppc_init_lpid(unsigned long nr_lpids_param)
1445{
1446 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1447 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1448}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301449EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001450
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001451int kvm_arch_init(void *opaque)
1452{
1453 return 0;
1454}
1455
Paolo Bonzini478d66862014-08-05 11:29:07 +02001456EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);