blob: ab2b3d54493785f412418c35d421a568f1b3c792 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050024#include <linux/vmalloc.h>
Alexander Graf544c6762009-11-02 12:02:31 +000025#include <linux/hrtimer.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050026#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Scott Woodeb1e4f42013-04-12 14:08:47 +000028#include <linux/file.h>
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +053029#include <linux/module.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050030#include <asm/cputable.h>
31#include <asm/uaccess.h>
32#include <asm/kvm_ppc.h>
Hollis Blanchard83aae4a2008-07-25 13:54:52 -050033#include <asm/tlbflush.h>
Paul Mackerras371fefd2011-06-29 00:23:08 +000034#include <asm/cputhreads.h>
Alexander Grafbd2be682012-08-13 01:04:19 +020035#include <asm/irqflags.h>
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +110036#include <asm/iommu.h>
Hollis Blanchard73e75b42008-12-02 15:51:57 -060037#include "timing.h"
Alexander Graf5efdb4b2013-04-17 00:37:57 +020038#include "irq.h"
Paul Mackerrasfad7b9b2008-12-23 14:57:26 +110039#include "../mm/mmu_decl.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Marcelo Tosatti46f43c62009-06-18 11:47:27 -030041#define CREATE_TRACE_POINTS
42#include "trace.h"
43
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +053044struct kvmppc_ops *kvmppc_hv_ops;
45EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46struct kvmppc_ops *kvmppc_pr_ops;
47EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053049
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050050int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51{
Liu Yu-B132019202e072012-07-03 05:48:52 +000052 return !!(v->arch.pending_exceptions) ||
Scott Wooddfd4d472011-11-17 12:39:59 +000053 v->requests;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050054}
55
Christoffer Dallb6d33832012-03-08 16:44:24 -050056int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
57{
58 return 1;
59}
60
Alexander Graf03d25c52012-08-10 12:28:50 +020061/*
62 * Common checks before entering the guest world. Call with interrupts
63 * disabled.
64 *
Alexander Graf7ee78852012-08-13 12:44:41 +020065 * returns:
66 *
67 * == 1 if we're ready to go into guest state
68 * <= 0 if we need to go back to the host with return value
Alexander Graf03d25c52012-08-10 12:28:50 +020069 */
70int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
71{
Scott Wood6c85f522014-01-09 19:18:40 -060072 int r;
Alexander Graf03d25c52012-08-10 12:28:50 +020073
Scott Wood6c85f522014-01-09 19:18:40 -060074 WARN_ON(irqs_disabled());
75 hard_irq_disable();
76
Alexander Graf03d25c52012-08-10 12:28:50 +020077 while (true) {
78 if (need_resched()) {
79 local_irq_enable();
80 cond_resched();
Scott Wood6c85f522014-01-09 19:18:40 -060081 hard_irq_disable();
Alexander Graf03d25c52012-08-10 12:28:50 +020082 continue;
83 }
84
85 if (signal_pending(current)) {
Alexander Graf7ee78852012-08-13 12:44:41 +020086 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
87 vcpu->run->exit_reason = KVM_EXIT_INTR;
88 r = -EINTR;
Alexander Graf03d25c52012-08-10 12:28:50 +020089 break;
90 }
91
Scott Wood5bd1cf12012-08-22 15:03:50 +000092 vcpu->mode = IN_GUEST_MODE;
93
94 /*
95 * Reading vcpu->requests must happen after setting vcpu->mode,
96 * so we don't miss a request because the requester sees
97 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
98 * before next entering the guest (and thus doesn't IPI).
Lan Tianyu489153c2016-03-13 11:10:30 +080099 * This also orders the write to mode from any reads
100 * to the page tables done while the VCPU is running.
101 * Please see the comment in kvm_flush_remote_tlbs.
Scott Wood5bd1cf12012-08-22 15:03:50 +0000102 */
Alexander Graf03d25c52012-08-10 12:28:50 +0200103 smp_mb();
Scott Wood5bd1cf12012-08-22 15:03:50 +0000104
Alexander Graf03d25c52012-08-10 12:28:50 +0200105 if (vcpu->requests) {
106 /* Make sure we process requests preemptable */
107 local_irq_enable();
108 trace_kvm_check_requests(vcpu);
Alexander Graf7c973a22012-08-13 12:50:35 +0200109 r = kvmppc_core_check_requests(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -0600110 hard_irq_disable();
Alexander Graf7c973a22012-08-13 12:50:35 +0200111 if (r > 0)
112 continue;
113 break;
Alexander Graf03d25c52012-08-10 12:28:50 +0200114 }
115
116 if (kvmppc_core_prepare_to_enter(vcpu)) {
117 /* interrupts got enabled in between, so we
118 are back at square 1 */
119 continue;
120 }
121
Paolo Bonzini6edaa532016-06-15 15:18:26 +0200122 guest_enter_irqoff();
Scott Wood6c85f522014-01-09 19:18:40 -0600123 return 1;
Alexander Graf03d25c52012-08-10 12:28:50 +0200124 }
125
Scott Wood6c85f522014-01-09 19:18:40 -0600126 /* return to host */
127 local_irq_enable();
Alexander Graf03d25c52012-08-10 12:28:50 +0200128 return r;
129}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530130EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
Alexander Graf03d25c52012-08-10 12:28:50 +0200131
Alexander Graf5deb8e72014-04-24 13:46:24 +0200132#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
133static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
134{
135 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
136 int i;
137
138 shared->sprg0 = swab64(shared->sprg0);
139 shared->sprg1 = swab64(shared->sprg1);
140 shared->sprg2 = swab64(shared->sprg2);
141 shared->sprg3 = swab64(shared->sprg3);
142 shared->srr0 = swab64(shared->srr0);
143 shared->srr1 = swab64(shared->srr1);
144 shared->dar = swab64(shared->dar);
145 shared->msr = swab64(shared->msr);
146 shared->dsisr = swab32(shared->dsisr);
147 shared->int_pending = swab32(shared->int_pending);
148 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
149 shared->sr[i] = swab32(shared->sr[i]);
150}
151#endif
152
Alexander Graf2a342ed2010-07-29 14:47:48 +0200153int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
154{
155 int nr = kvmppc_get_gpr(vcpu, 11);
156 int r;
157 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
158 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
159 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
160 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
161 unsigned long r2 = 0;
162
Alexander Graf5deb8e72014-04-24 13:46:24 +0200163 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
Alexander Graf2a342ed2010-07-29 14:47:48 +0200164 /* 32 bit mode */
165 param1 &= 0xffffffff;
166 param2 &= 0xffffffff;
167 param3 &= 0xffffffff;
168 param4 &= 0xffffffff;
169 }
170
171 switch (nr) {
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000172 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
Alexander Graf5fc87402010-07-29 14:47:55 +0200173 {
Alexander Graf5deb8e72014-04-24 13:46:24 +0200174#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
175 /* Book3S can be little endian, find it out here */
176 int shared_big_endian = true;
177 if (vcpu->arch.intr_msr & MSR_LE)
178 shared_big_endian = false;
179 if (shared_big_endian != vcpu->arch.shared_big_endian)
180 kvmppc_swab_shared(vcpu);
181 vcpu->arch.shared_big_endian = shared_big_endian;
182#endif
183
Alexander Graff3383cf2014-05-12 01:08:32 +0200184 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
185 /*
186 * Older versions of the Linux magic page code had
187 * a bug where they would map their trampoline code
188 * NX. If that's the case, remove !PR NX capability.
189 */
190 vcpu->arch.disable_kernel_nx = true;
191 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
192 }
193
194 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
195 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
Alexander Graf5fc87402010-07-29 14:47:55 +0200196
Alexander Graf89b68c92014-07-13 16:37:12 +0200197#ifdef CONFIG_PPC_64K_PAGES
198 /*
199 * Make sure our 4k magic page is in the same window of a 64k
200 * page within the guest and within the host's page.
201 */
202 if ((vcpu->arch.magic_page_pa & 0xf000) !=
203 ((ulong)vcpu->arch.shared & 0xf000)) {
204 void *old_shared = vcpu->arch.shared;
205 ulong shared = (ulong)vcpu->arch.shared;
206 void *new_shared;
207
208 shared &= PAGE_MASK;
209 shared |= vcpu->arch.magic_page_pa & 0xf000;
210 new_shared = (void*)shared;
211 memcpy(new_shared, old_shared, 0x1000);
212 vcpu->arch.shared = new_shared;
213 }
214#endif
215
Scott Woodb5904972011-11-08 18:23:30 -0600216 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
Alexander Graf7508e162010-08-03 11:32:56 +0200217
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000218 r = EV_SUCCESS;
Alexander Graf5fc87402010-07-29 14:47:55 +0200219 break;
220 }
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000221 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
222 r = EV_SUCCESS;
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000223#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
Alexander Graf5fc87402010-07-29 14:47:55 +0200224 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
225#endif
Alexander Graf2a342ed2010-07-29 14:47:48 +0200226
227 /* Second return value is in r4 */
Alexander Graf2a342ed2010-07-29 14:47:48 +0200228 break;
Liu Yu-B132019202e072012-07-03 05:48:52 +0000229 case EV_HCALL_TOKEN(EV_IDLE):
230 r = EV_SUCCESS;
231 kvm_vcpu_block(vcpu);
232 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
233 break;
Alexander Graf2a342ed2010-07-29 14:47:48 +0200234 default:
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000235 r = EV_UNIMPLEMENTED;
Alexander Graf2a342ed2010-07-29 14:47:48 +0200236 break;
237 }
238
Alexander Graf7508e162010-08-03 11:32:56 +0200239 kvmppc_set_gpr(vcpu, 4, r2);
240
Alexander Graf2a342ed2010-07-29 14:47:48 +0200241 return r;
242}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530243EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500244
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200245int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
246{
247 int r = false;
248
249 /* We have to know what CPU to virtualize */
250 if (!vcpu->arch.pvr)
251 goto out;
252
253 /* PAPR only works with book3s_64 */
254 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
255 goto out;
256
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200257 /* HV KVM can only do PAPR mode for now */
Aneesh Kumar K.Va78b55d2013-10-07 22:18:02 +0530258 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200259 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200260
Scott Woodd30f6e42011-12-20 15:34:43 +0000261#ifdef CONFIG_KVM_BOOKE_HV
262 if (!cpu_has_feature(CPU_FTR_EMB_HV))
263 goto out;
264#endif
265
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200266 r = true;
267
268out:
269 vcpu->arch.sane = r;
270 return r ? 0 : -EINVAL;
271}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530272EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200273
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500274int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
275{
276 enum emulation_result er;
277 int r;
278
Alexander Grafd69614a2014-06-18 14:53:49 +0200279 er = kvmppc_emulate_loadstore(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500280 switch (er) {
281 case EMULATE_DONE:
282 /* Future optimization: only reload non-volatiles if they were
283 * actually modified. */
284 r = RESUME_GUEST_NV;
285 break;
Mihai Caraman51f04722014-07-23 19:06:21 +0300286 case EMULATE_AGAIN:
287 r = RESUME_GUEST;
288 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500289 case EMULATE_DO_MMIO:
290 run->exit_reason = KVM_EXIT_MMIO;
291 /* We must reload nonvolatiles because "update" load/store
292 * instructions modify register state. */
293 /* Future optimization: only reload non-volatiles if they were
294 * actually modified. */
295 r = RESUME_HOST_NV;
296 break;
297 case EMULATE_FAIL:
Mihai Caraman51f04722014-07-23 19:06:21 +0300298 {
299 u32 last_inst;
300
Alexander Graf8d0eff62014-09-10 14:37:29 +0200301 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500302 /* XXX Deliver Program interrupt to guest. */
Mihai Caraman51f04722014-07-23 19:06:21 +0300303 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500304 r = RESUME_HOST;
305 break;
Mihai Caraman51f04722014-07-23 19:06:21 +0300306 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500307 default:
Alexander Graf5a331692012-12-14 23:46:03 +0100308 WARN_ON(1);
309 r = RESUME_GUEST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500310 }
311
312 return r;
313}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530314EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500315
Alexander Graf35c4a732014-06-20 13:58:16 +0200316int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
317 bool data)
318{
Alexander Grafc12fb432014-06-20 14:43:36 +0200319 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
Alexander Graf35c4a732014-06-20 13:58:16 +0200320 struct kvmppc_pte pte;
321 int r;
322
323 vcpu->stat.st++;
324
325 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
326 XLATE_WRITE, &pte);
327 if (r < 0)
328 return r;
329
330 *eaddr = pte.raddr;
331
332 if (!pte.may_write)
333 return -EPERM;
334
Alexander Grafc12fb432014-06-20 14:43:36 +0200335 /* Magic page override */
336 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
337 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
338 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
339 void *magic = vcpu->arch.shared;
340 magic += pte.eaddr & 0xfff;
341 memcpy(magic, ptr, size);
342 return EMULATE_DONE;
343 }
344
Alexander Graf35c4a732014-06-20 13:58:16 +0200345 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
346 return EMULATE_DO_MMIO;
347
348 return EMULATE_DONE;
349}
350EXPORT_SYMBOL_GPL(kvmppc_st);
351
352int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
353 bool data)
354{
Alexander Grafc12fb432014-06-20 14:43:36 +0200355 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
Alexander Graf35c4a732014-06-20 13:58:16 +0200356 struct kvmppc_pte pte;
Alexander Graf35c4a732014-06-20 13:58:16 +0200357 int rc;
358
359 vcpu->stat.ld++;
360
361 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
362 XLATE_READ, &pte);
363 if (rc)
364 return rc;
365
366 *eaddr = pte.raddr;
367
368 if (!pte.may_read)
369 return -EPERM;
370
371 if (!data && !pte.may_execute)
372 return -ENOEXEC;
373
Alexander Grafc12fb432014-06-20 14:43:36 +0200374 /* Magic page override */
375 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
376 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
377 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
378 void *magic = vcpu->arch.shared;
379 magic += pte.eaddr & 0xfff;
380 memcpy(ptr, magic, size);
381 return EMULATE_DONE;
382 }
383
Alexander Grafc45c5512014-06-20 14:17:30 +0200384 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
385 return EMULATE_DO_MMIO;
Alexander Graf35c4a732014-06-20 13:58:16 +0200386
387 return EMULATE_DONE;
Alexander Graf35c4a732014-06-20 13:58:16 +0200388}
389EXPORT_SYMBOL_GPL(kvmppc_ld);
390
Radim Krčmář13a34e02014-08-28 15:13:03 +0200391int kvm_arch_hardware_enable(void)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500392{
Alexander Graf10474ae2009-09-15 11:37:46 +0200393 return 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500394}
395
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500396int kvm_arch_hardware_setup(void)
397{
398 return 0;
399}
400
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500401void kvm_arch_check_processor_compat(void *rtn)
402{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600403 *(int *)rtn = kvmppc_core_check_processor_compat();
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500404}
405
Carsten Ottee08b9632012-01-04 10:25:20 +0100406int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500407{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530408 struct kvmppc_ops *kvm_ops = NULL;
409 /*
410 * if we have both HV and PR enabled, default is HV
411 */
412 if (type == 0) {
413 if (kvmppc_hv_ops)
414 kvm_ops = kvmppc_hv_ops;
415 else
416 kvm_ops = kvmppc_pr_ops;
417 if (!kvm_ops)
418 goto err_out;
419 } else if (type == KVM_VM_PPC_HV) {
420 if (!kvmppc_hv_ops)
421 goto err_out;
422 kvm_ops = kvmppc_hv_ops;
423 } else if (type == KVM_VM_PPC_PR) {
424 if (!kvmppc_pr_ops)
425 goto err_out;
426 kvm_ops = kvmppc_pr_ops;
427 } else
428 goto err_out;
Carsten Ottee08b9632012-01-04 10:25:20 +0100429
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530430 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
431 return -ENOENT;
432
433 kvm->arch.kvm_ops = kvm_ops;
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000434 return kvmppc_core_init_vm(kvm);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530435err_out:
436 return -EINVAL;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500437}
438
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100439void kvm_arch_destroy_vm(struct kvm *kvm)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500440{
441 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300442 struct kvm_vcpu *vcpu;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500443
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600444#ifdef CONFIG_KVM_XICS
445 /*
446 * We call kick_all_cpus_sync() to ensure that all
447 * CPUs have executed any pending IPIs before we
448 * continue and free VCPUs structures below.
449 */
450 if (is_kvmppc_hv_enabled(kvm))
451 kick_all_cpus_sync();
452#endif
453
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300454 kvm_for_each_vcpu(i, vcpu, kvm)
455 kvm_arch_vcpu_free(vcpu);
456
457 mutex_lock(&kvm->lock);
458 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
459 kvm->vcpus[i] = NULL;
460
461 atomic_set(&kvm->online_vcpus, 0);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000462
463 kvmppc_core_destroy_vm(kvm);
464
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300465 mutex_unlock(&kvm->lock);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530466
467 /* drop the module reference */
468 module_put(kvm->arch.kvm_ops->owner);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500469}
470
Alexander Graf784aa3d2014-07-14 18:27:35 +0200471int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500472{
473 int r;
Alexander Graf7a587772014-07-14 18:55:19 +0200474 /* Assume we're using HV mode when the HV module is loaded */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530475 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500476
Alexander Graf7a587772014-07-14 18:55:19 +0200477 if (kvm) {
478 /*
479 * Hooray - we know which VM type we're running on. Depend on
480 * that rather than the guess above.
481 */
482 hv_enabled = is_kvmppc_hv_enabled(kvm);
483 }
484
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500485 switch (ext) {
Scott Wood5ce941e2011-04-27 17:24:21 -0500486#ifdef CONFIG_BOOKE
487 case KVM_CAP_PPC_BOOKE_SREGS:
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000488 case KVM_CAP_PPC_BOOKE_WATCHDOG:
Alexander Graf1c810632013-01-04 18:12:48 +0100489 case KVM_CAP_PPC_EPR:
Scott Wood5ce941e2011-04-27 17:24:21 -0500490#else
Alexander Grafe15a1132009-11-30 03:02:02 +0000491 case KVM_CAP_PPC_SEGSTATE:
Alexander Graf1022fc32011-09-14 21:45:23 +0200492 case KVM_CAP_PPC_HIOR:
Alexander Graf930b4122011-08-08 17:29:42 +0200493 case KVM_CAP_PPC_PAPR:
Scott Wood5ce941e2011-04-27 17:24:21 -0500494#endif
Alexander Graf18978762010-03-24 21:48:18 +0100495 case KVM_CAP_PPC_UNSET_IRQ:
Alexander Graf7b4203e2010-08-30 13:50:45 +0200496 case KVM_CAP_PPC_IRQ_LEVEL:
Alexander Graf71fbfd52010-03-24 21:48:29 +0100497 case KVM_CAP_ENABLE_CAP:
Paul Mackerras699a0ea2014-06-02 11:02:59 +1000498 case KVM_CAP_ENABLE_CAP_VM:
Alexander Grafe24ed812011-09-14 10:02:41 +0200499 case KVM_CAP_ONE_REG:
Alexander Graf0e673fb2012-10-09 00:06:20 +0200500 case KVM_CAP_IOEVENTFD:
Scott Wood5df554a2013-04-12 14:08:46 +0000501 case KVM_CAP_DEVICE_CTRL:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000502 r = 1;
503 break;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000504 case KVM_CAP_PPC_PAIRED_SINGLES:
Alexander Grafad0a0482010-03-24 21:48:30 +0100505 case KVM_CAP_PPC_OSI:
Alexander Graf15711e92010-07-29 14:48:08 +0200506 case KVM_CAP_PPC_GET_PVINFO:
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000507#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -0500508 case KVM_CAP_SW_TLB:
509#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530510 /* We support this only for PR */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530511 r = !hv_enabled;
Alexander Grafe15a1132009-11-30 03:02:02 +0000512 break;
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530513#ifdef CONFIG_KVM_MMIO
Laurent Vivier588968b2008-05-30 16:05:56 +0200514 case KVM_CAP_COALESCED_MMIO:
515 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
516 break;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000517#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530518#ifdef CONFIG_KVM_MPIC
519 case KVM_CAP_IRQ_MPIC:
520 r = 1;
521 break;
522#endif
523
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000524#ifdef CONFIG_PPC_BOOK3S_64
David Gibson54738c02011-06-29 00:22:41 +0000525 case KVM_CAP_SPAPR_TCE:
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +1100526 case KVM_CAP_SPAPR_TCE_64:
Paul Mackerras32fad282012-05-04 02:32:53 +0000527 case KVM_CAP_PPC_ALLOC_HTAB:
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000528 case KVM_CAP_PPC_RTAS:
Alexander Graff2e91042014-05-22 17:40:15 +0200529 case KVM_CAP_PPC_FIXUP_HCALL:
Paul Mackerras699a0ea2014-06-02 11:02:59 +1000530 case KVM_CAP_PPC_ENABLE_HCALL:
Paul Mackerras5975a2e2013-04-27 00:28:37 +0000531#ifdef CONFIG_KVM_XICS
532 case KVM_CAP_IRQ_XICS:
533#endif
David Gibson54738c02011-06-29 00:22:41 +0000534 r = 1;
535 break;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000536#endif /* CONFIG_PPC_BOOK3S_64 */
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530537#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerras371fefd2011-06-29 00:23:08 +0000538 case KVM_CAP_PPC_SMT:
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530539 if (hv_enabled)
Michael Ellerman3102f782014-05-23 18:15:29 +1000540 r = threads_per_subcore;
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530541 else
542 r = 0;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000543 break;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000544 case KVM_CAP_PPC_RMA:
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100545 r = 0;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000546 break;
Michael Ellermane928e9c2015-03-20 20:39:41 +1100547 case KVM_CAP_PPC_HWRNG:
548 r = kvmppc_hwrng_present();
549 break;
David Gibson54738c02011-06-29 00:22:41 +0000550#endif
Alexander Graff4800b12012-08-07 10:24:14 +0200551 case KVM_CAP_SYNC_MMU:
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530552#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100553 r = hv_enabled;
Alexander Graff4800b12012-08-07 10:24:14 +0200554#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
555 r = 1;
556#else
557 r = 0;
Paul Mackerrasa2932922012-11-19 22:57:20 +0000558#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530559 break;
560#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasa2932922012-11-19 22:57:20 +0000561 case KVM_CAP_PPC_HTAB_FD:
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530562 r = hv_enabled;
Paul Mackerrasa2932922012-11-19 22:57:20 +0000563 break;
Alexander Graff4800b12012-08-07 10:24:14 +0200564#endif
Matt Evansb5434032011-12-07 16:55:57 +0000565 case KVM_CAP_NR_VCPUS:
566 /*
567 * Recommending a number of CPUs is somewhat arbitrary; we
568 * return the number of present CPUs for -HV (since a host
569 * will have secondary threads "offline"), and for other KVM
570 * implementations just count online CPUs.
571 */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530572 if (hv_enabled)
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530573 r = num_present_cpus();
574 else
575 r = num_online_cpus();
Matt Evansb5434032011-12-07 16:55:57 +0000576 break;
Nikunj A Dadhaniabfec5c2c2015-10-16 10:27:53 +0530577 case KVM_CAP_NR_MEMSLOTS:
578 r = KVM_USER_MEM_SLOTS;
579 break;
Matt Evansb5434032011-12-07 16:55:57 +0000580 case KVM_CAP_MAX_VCPUS:
581 r = KVM_MAX_VCPUS;
582 break;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000583#ifdef CONFIG_PPC_BOOK3S_64
584 case KVM_CAP_PPC_GET_SMMU_INFO:
585 r = 1;
586 break;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100587 case KVM_CAP_SPAPR_MULTITCE:
588 r = 1;
589 break;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000590#endif
Sam Bobroff23528bb2016-07-20 13:41:36 +1000591 case KVM_CAP_PPC_HTM:
592 r = cpu_has_feature(CPU_FTR_TM_COMP) &&
593 is_kvmppc_hv_enabled(kvm);
594 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500595 default:
596 r = 0;
597 break;
598 }
599 return r;
600
601}
602
603long kvm_arch_dev_ioctl(struct file *filp,
604 unsigned int ioctl, unsigned long arg)
605{
606 return -EINVAL;
607}
608
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530609void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900610 struct kvm_memory_slot *dont)
611{
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530612 kvmppc_core_free_memslot(kvm, free, dont);
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900613}
614
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530615int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
616 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900617{
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530618 return kvmppc_core_create_memslot(kvm, slot, npages);
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900619}
620
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200621int kvm_arch_prepare_memory_region(struct kvm *kvm,
Takuya Yoshikawa462fce42013-02-27 19:41:56 +0900622 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200623 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +0900624 enum kvm_mr_change change)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500625{
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000626 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500627}
628
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200629void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200630 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900631 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200632 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900633 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200634{
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200635 kvmppc_core_commit_memory_region(kvm, mem, old, new);
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200636}
637
Marcelo Tosatti2df72e92012-08-24 15:54:57 -0300638void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
639 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300640{
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000641 kvmppc_core_flush_memslot(kvm, slot);
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300642}
643
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500644struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
645{
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600646 struct kvm_vcpu *vcpu;
647 vcpu = kvmppc_core_vcpu_create(kvm, id);
Matt Evans03cdab52011-12-06 21:19:42 +0000648 if (!IS_ERR(vcpu)) {
649 vcpu->arch.wqp = &vcpu->wq;
Wei Yongjun06056bf2010-03-09 14:13:43 +0800650 kvmppc_create_vcpu_debugfs(vcpu, id);
Matt Evans03cdab52011-12-06 21:19:42 +0000651 }
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600652 return vcpu;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500653}
654
Dominik Dingel31928aa2014-12-04 15:47:07 +0100655void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200656{
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200657}
658
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500659void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
660{
Alexander Grafa5954052010-02-22 16:52:14 +0100661 /* Make sure we're not using the vcpu anymore */
662 hrtimer_cancel(&vcpu->arch.dec_timer);
Alexander Grafa5954052010-02-22 16:52:14 +0100663
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600664 kvmppc_remove_vcpu_debugfs(vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000665
666 switch (vcpu->arch.irq_type) {
667 case KVMPPC_IRQ_MPIC:
668 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
669 break;
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000670 case KVMPPC_IRQ_XICS:
671 kvmppc_xics_free_icp(vcpu);
672 break;
Scott Woodeb1e4f42013-04-12 14:08:47 +0000673 }
674
Hollis Blancharddb93f572008-11-05 09:36:18 -0600675 kvmppc_core_vcpu_free(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500676}
677
678void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
679{
680 kvm_arch_vcpu_free(vcpu);
681}
682
683int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
684{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600685 return kvmppc_core_pending_dec(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500686}
687
Thomas Huth5358a962015-05-22 09:25:02 +0200688static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
Alexander Graf544c6762009-11-02 12:02:31 +0000689{
690 struct kvm_vcpu *vcpu;
691
692 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
Mihai Caramand02d4d12014-09-01 17:19:56 +0300693 kvmppc_decrementer_func(vcpu);
Alexander Graf544c6762009-11-02 12:02:31 +0000694
695 return HRTIMER_NORESTART;
696}
697
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500698int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
699{
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000700 int ret;
701
Alexander Graf544c6762009-11-02 12:02:31 +0000702 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
Alexander Graf544c6762009-11-02 12:02:31 +0000703 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000704 vcpu->arch.dec_expires = ~(u64)0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500705
Bharat Bhushan09000ad2011-03-25 10:32:13 +0530706#ifdef CONFIG_KVM_EXIT_TIMING
707 mutex_init(&vcpu->arch.exit_timing_lock);
708#endif
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000709 ret = kvmppc_subarch_vcpu_init(vcpu);
710 return ret;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500711}
712
713void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
714{
Hollis Blanchardecc09812009-01-03 16:22:59 -0600715 kvmppc_mmu_destroy(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000716 kvmppc_subarch_vcpu_uninit(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500717}
718
719void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
720{
Scott Woodeab17672011-04-27 17:24:10 -0500721#ifdef CONFIG_BOOKE
722 /*
723 * vrsave (formerly usprg0) isn't used by Linux, but may
724 * be used by the guest.
725 *
726 * On non-booke this is associated with Altivec and
727 * is handled by code in book3s.c.
728 */
729 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
730#endif
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600731 kvmppc_core_vcpu_load(vcpu, cpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500732}
733
734void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
735{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600736 kvmppc_core_vcpu_put(vcpu);
Scott Woodeab17672011-04-27 17:24:10 -0500737#ifdef CONFIG_BOOKE
738 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
739#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500740}
741
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500742static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
743 struct kvm_run *run)
744{
Denis Kirjanov69b61832010-06-11 11:23:26 +0000745 u64 uninitialized_var(gpr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500746
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100747 if (run->mmio.len > sizeof(gpr)) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500748 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
749 return;
750 }
751
David Gibsond078eed2015-02-03 16:36:24 +1100752 if (!vcpu->arch.mmio_host_swabbed) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500753 switch (run->mmio.len) {
Alexander Grafb104d062010-02-19 11:00:29 +0100754 case 8: gpr = *(u64 *)run->mmio.data; break;
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100755 case 4: gpr = *(u32 *)run->mmio.data; break;
756 case 2: gpr = *(u16 *)run->mmio.data; break;
757 case 1: gpr = *(u8 *)run->mmio.data; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500758 }
759 } else {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500760 switch (run->mmio.len) {
David Gibsond078eed2015-02-03 16:36:24 +1100761 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
762 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
763 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100764 case 1: gpr = *(u8 *)run->mmio.data; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500765 }
766 }
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100767
Alexander Graf3587d532010-02-19 11:00:30 +0100768 if (vcpu->arch.mmio_sign_extend) {
769 switch (run->mmio.len) {
770#ifdef CONFIG_PPC64
771 case 4:
772 gpr = (s64)(s32)gpr;
773 break;
774#endif
775 case 2:
776 gpr = (s64)(s16)gpr;
777 break;
778 case 1:
779 gpr = (s64)(s8)gpr;
780 break;
781 }
782 }
783
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100784 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
Alexander Grafb104d062010-02-19 11:00:29 +0100785
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100786 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
787 case KVM_MMIO_REG_GPR:
Alexander Grafb104d062010-02-19 11:00:29 +0100788 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
789 break;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100790 case KVM_MMIO_REG_FPR:
Paul Mackerrasefff1912013-10-15 20:43:02 +1100791 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +0100792 break;
Alexander Graf287d5612010-04-01 15:33:21 +0200793#ifdef CONFIG_PPC_BOOK3S
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100794 case KVM_MMIO_REG_QPR:
795 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +0100796 break;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100797 case KVM_MMIO_REG_FQPR:
Paul Mackerrasefff1912013-10-15 20:43:02 +1100798 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100799 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +0100800 break;
Alexander Graf287d5612010-04-01 15:33:21 +0200801#endif
Alexander Grafb104d062010-02-19 11:00:29 +0100802 default:
803 BUG();
804 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500805}
806
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000807static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
808 unsigned int rt, unsigned int bytes,
809 int is_default_endian, int sign_extend)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500810{
Scott Wooded840ee2013-04-26 14:53:39 +0000811 int idx, ret;
David Gibsond078eed2015-02-03 16:36:24 +1100812 bool host_swabbed;
Cédric Le Goater73601772014-01-09 11:51:16 +0100813
David Gibsond078eed2015-02-03 16:36:24 +1100814 /* Pity C doesn't have a logical XOR operator */
Cédric Le Goater73601772014-01-09 11:51:16 +0100815 if (kvmppc_need_byteswap(vcpu)) {
David Gibsond078eed2015-02-03 16:36:24 +1100816 host_swabbed = is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100817 } else {
David Gibsond078eed2015-02-03 16:36:24 +1100818 host_swabbed = !is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100819 }
Scott Wooded840ee2013-04-26 14:53:39 +0000820
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500821 if (bytes > sizeof(run->mmio.data)) {
822 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
823 run->mmio.len);
824 }
825
826 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
827 run->mmio.len = bytes;
828 run->mmio.is_write = 0;
829
830 vcpu->arch.io_gpr = rt;
David Gibsond078eed2015-02-03 16:36:24 +1100831 vcpu->arch.mmio_host_swabbed = host_swabbed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500832 vcpu->mmio_needed = 1;
833 vcpu->mmio_is_write = 0;
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000834 vcpu->arch.mmio_sign_extend = sign_extend;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500835
Scott Wooded840ee2013-04-26 14:53:39 +0000836 idx = srcu_read_lock(&vcpu->kvm->srcu);
837
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000838 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
Scott Wooded840ee2013-04-26 14:53:39 +0000839 bytes, &run->mmio.data);
840
841 srcu_read_unlock(&vcpu->kvm->srcu, idx);
842
843 if (!ret) {
Alexander Graf0e673fb2012-10-09 00:06:20 +0200844 kvmppc_complete_mmio_load(vcpu, run);
845 vcpu->mmio_needed = 0;
846 return EMULATE_DONE;
847 }
848
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500849 return EMULATE_DO_MMIO;
850}
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000851
852int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
853 unsigned int rt, unsigned int bytes,
854 int is_default_endian)
855{
856 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
857}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530858EXPORT_SYMBOL_GPL(kvmppc_handle_load);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500859
Alexander Graf3587d532010-02-19 11:00:30 +0100860/* Same as above, but sign extends */
861int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +0100862 unsigned int rt, unsigned int bytes,
863 int is_default_endian)
Alexander Graf3587d532010-02-19 11:00:30 +0100864{
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000865 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
Alexander Graf3587d532010-02-19 11:00:30 +0100866}
867
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500868int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +0100869 u64 val, unsigned int bytes, int is_default_endian)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500870{
871 void *data = run->mmio.data;
Scott Wooded840ee2013-04-26 14:53:39 +0000872 int idx, ret;
David Gibsond078eed2015-02-03 16:36:24 +1100873 bool host_swabbed;
Cédric Le Goater73601772014-01-09 11:51:16 +0100874
David Gibsond078eed2015-02-03 16:36:24 +1100875 /* Pity C doesn't have a logical XOR operator */
Cédric Le Goater73601772014-01-09 11:51:16 +0100876 if (kvmppc_need_byteswap(vcpu)) {
David Gibsond078eed2015-02-03 16:36:24 +1100877 host_swabbed = is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100878 } else {
David Gibsond078eed2015-02-03 16:36:24 +1100879 host_swabbed = !is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100880 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500881
882 if (bytes > sizeof(run->mmio.data)) {
883 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
884 run->mmio.len);
885 }
886
887 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
888 run->mmio.len = bytes;
889 run->mmio.is_write = 1;
890 vcpu->mmio_needed = 1;
891 vcpu->mmio_is_write = 1;
892
893 /* Store the value at the lowest bytes in 'data'. */
David Gibsond078eed2015-02-03 16:36:24 +1100894 if (!host_swabbed) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500895 switch (bytes) {
Alexander Grafb104d062010-02-19 11:00:29 +0100896 case 8: *(u64 *)data = val; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500897 case 4: *(u32 *)data = val; break;
898 case 2: *(u16 *)data = val; break;
899 case 1: *(u8 *)data = val; break;
900 }
901 } else {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500902 switch (bytes) {
David Gibsond078eed2015-02-03 16:36:24 +1100903 case 8: *(u64 *)data = swab64(val); break;
904 case 4: *(u32 *)data = swab32(val); break;
905 case 2: *(u16 *)data = swab16(val); break;
906 case 1: *(u8 *)data = val; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500907 }
908 }
909
Scott Wooded840ee2013-04-26 14:53:39 +0000910 idx = srcu_read_lock(&vcpu->kvm->srcu);
911
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000912 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
Scott Wooded840ee2013-04-26 14:53:39 +0000913 bytes, &run->mmio.data);
914
915 srcu_read_unlock(&vcpu->kvm->srcu, idx);
916
917 if (!ret) {
Alexander Graf0e673fb2012-10-09 00:06:20 +0200918 vcpu->mmio_needed = 0;
919 return EMULATE_DONE;
920 }
921
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500922 return EMULATE_DO_MMIO;
923}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530924EXPORT_SYMBOL_GPL(kvmppc_handle_store);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500925
Mihai Caraman8a41ea52014-08-20 16:36:24 +0300926int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
927{
928 int r = 0;
929 union kvmppc_one_reg val;
930 int size;
931
932 size = one_reg_size(reg->id);
933 if (size > sizeof(val))
934 return -EINVAL;
935
936 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
937 if (r == -EINVAL) {
938 r = 0;
939 switch (reg->id) {
Mihai Caraman3840edc2014-08-20 16:36:25 +0300940#ifdef CONFIG_ALTIVEC
941 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
942 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
943 r = -ENXIO;
944 break;
945 }
Greg Kurzb4d7f162016-01-13 18:28:17 +0100946 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
Mihai Caraman3840edc2014-08-20 16:36:25 +0300947 break;
948 case KVM_REG_PPC_VSCR:
949 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
950 r = -ENXIO;
951 break;
952 }
Greg Kurzb4d7f162016-01-13 18:28:17 +0100953 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
Mihai Caraman3840edc2014-08-20 16:36:25 +0300954 break;
955 case KVM_REG_PPC_VRSAVE:
Greg Kurzb4d7f162016-01-13 18:28:17 +0100956 val = get_reg_val(reg->id, vcpu->arch.vrsave);
Mihai Caraman3840edc2014-08-20 16:36:25 +0300957 break;
958#endif /* CONFIG_ALTIVEC */
Mihai Caraman8a41ea52014-08-20 16:36:24 +0300959 default:
960 r = -EINVAL;
961 break;
962 }
963 }
964
965 if (r)
966 return r;
967
968 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
969 r = -EFAULT;
970
971 return r;
972}
973
974int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
975{
976 int r;
977 union kvmppc_one_reg val;
978 int size;
979
980 size = one_reg_size(reg->id);
981 if (size > sizeof(val))
982 return -EINVAL;
983
984 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
985 return -EFAULT;
986
987 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
988 if (r == -EINVAL) {
989 r = 0;
990 switch (reg->id) {
Mihai Caraman3840edc2014-08-20 16:36:25 +0300991#ifdef CONFIG_ALTIVEC
992 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
993 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
994 r = -ENXIO;
995 break;
996 }
Greg Kurzb4d7f162016-01-13 18:28:17 +0100997 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
Mihai Caraman3840edc2014-08-20 16:36:25 +0300998 break;
999 case KVM_REG_PPC_VSCR:
1000 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1001 r = -ENXIO;
1002 break;
1003 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001004 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001005 break;
1006 case KVM_REG_PPC_VRSAVE:
Greg Kurzb4d7f162016-01-13 18:28:17 +01001007 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1008 r = -ENXIO;
1009 break;
1010 }
1011 vcpu->arch.vrsave = set_reg_val(reg->id, val);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001012 break;
1013#endif /* CONFIG_ALTIVEC */
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001014 default:
1015 r = -EINVAL;
1016 break;
1017 }
1018 }
1019
1020 return r;
1021}
1022
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001023int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1024{
1025 int r;
1026 sigset_t sigsaved;
1027
1028 if (vcpu->sigset_active)
1029 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1030
1031 if (vcpu->mmio_needed) {
1032 if (!vcpu->mmio_is_write)
1033 kvmppc_complete_mmio_load(vcpu, run);
1034 vcpu->mmio_needed = 0;
Alexander Grafad0a0482010-03-24 21:48:30 +01001035 } else if (vcpu->arch.osi_needed) {
1036 u64 *gprs = run->osi.gprs;
1037 int i;
1038
1039 for (i = 0; i < 32; i++)
1040 kvmppc_set_gpr(vcpu, i, gprs[i]);
1041 vcpu->arch.osi_needed = 0;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001042 } else if (vcpu->arch.hcall_needed) {
1043 int i;
1044
1045 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1046 for (i = 0; i < 9; ++i)
1047 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1048 vcpu->arch.hcall_needed = 0;
Alexander Graf1c810632013-01-04 18:12:48 +01001049#ifdef CONFIG_BOOKE
1050 } else if (vcpu->arch.epr_needed) {
1051 kvmppc_set_epr(vcpu, run->epr.epr);
1052 vcpu->arch.epr_needed = 0;
1053#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001054 }
1055
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001056 r = kvmppc_vcpu_run(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001057
1058 if (vcpu->sigset_active)
1059 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1060
1061 return r;
1062}
1063
1064int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1065{
Paul Mackerras19ccb762011-07-23 17:42:46 +10001066 if (irq->irq == KVM_INTERRUPT_UNSET) {
Paul Mackerras4fe27d22013-02-14 14:00:25 +00001067 kvmppc_core_dequeue_external(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001068 return 0;
1069 }
Hollis Blanchard45c5eb62008-04-25 17:55:49 -05001070
Paul Mackerras19ccb762011-07-23 17:42:46 +10001071 kvmppc_core_queue_external(vcpu, irq);
Christoffer Dallb6d33832012-03-08 16:44:24 -05001072
Scott Wooddfd4d472011-11-17 12:39:59 +00001073 kvm_vcpu_kick(vcpu);
Hollis Blanchard45c5eb62008-04-25 17:55:49 -05001074
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001075 return 0;
1076}
1077
Alexander Graf71fbfd52010-03-24 21:48:29 +01001078static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1079 struct kvm_enable_cap *cap)
1080{
1081 int r;
1082
1083 if (cap->flags)
1084 return -EINVAL;
1085
1086 switch (cap->cap) {
Alexander Grafad0a0482010-03-24 21:48:30 +01001087 case KVM_CAP_PPC_OSI:
1088 r = 0;
1089 vcpu->arch.osi_enabled = true;
1090 break;
Alexander Graf930b4122011-08-08 17:29:42 +02001091 case KVM_CAP_PPC_PAPR:
1092 r = 0;
1093 vcpu->arch.papr_enabled = true;
1094 break;
Alexander Graf1c810632013-01-04 18:12:48 +01001095 case KVM_CAP_PPC_EPR:
1096 r = 0;
Scott Wood5df554a2013-04-12 14:08:46 +00001097 if (cap->args[0])
1098 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1099 else
1100 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
Alexander Graf1c810632013-01-04 18:12:48 +01001101 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001102#ifdef CONFIG_BOOKE
1103 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1104 r = 0;
1105 vcpu->arch.watchdog_enabled = true;
1106 break;
1107#endif
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001108#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -05001109 case KVM_CAP_SW_TLB: {
1110 struct kvm_config_tlb cfg;
1111 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1112
1113 r = -EFAULT;
1114 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1115 break;
1116
1117 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1118 break;
1119 }
1120#endif
Scott Woodeb1e4f42013-04-12 14:08:47 +00001121#ifdef CONFIG_KVM_MPIC
1122 case KVM_CAP_IRQ_MPIC: {
Al Viro70abade2013-08-30 15:04:22 -04001123 struct fd f;
Scott Woodeb1e4f42013-04-12 14:08:47 +00001124 struct kvm_device *dev;
1125
1126 r = -EBADF;
Al Viro70abade2013-08-30 15:04:22 -04001127 f = fdget(cap->args[0]);
1128 if (!f.file)
Scott Woodeb1e4f42013-04-12 14:08:47 +00001129 break;
1130
1131 r = -EPERM;
Al Viro70abade2013-08-30 15:04:22 -04001132 dev = kvm_device_from_filp(f.file);
Scott Woodeb1e4f42013-04-12 14:08:47 +00001133 if (dev)
1134 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1135
Al Viro70abade2013-08-30 15:04:22 -04001136 fdput(f);
Scott Woodeb1e4f42013-04-12 14:08:47 +00001137 break;
1138 }
1139#endif
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001140#ifdef CONFIG_KVM_XICS
1141 case KVM_CAP_IRQ_XICS: {
Al Viro70abade2013-08-30 15:04:22 -04001142 struct fd f;
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001143 struct kvm_device *dev;
1144
1145 r = -EBADF;
Al Viro70abade2013-08-30 15:04:22 -04001146 f = fdget(cap->args[0]);
1147 if (!f.file)
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001148 break;
1149
1150 r = -EPERM;
Al Viro70abade2013-08-30 15:04:22 -04001151 dev = kvm_device_from_filp(f.file);
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001152 if (dev)
1153 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1154
Al Viro70abade2013-08-30 15:04:22 -04001155 fdput(f);
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001156 break;
1157 }
1158#endif /* CONFIG_KVM_XICS */
Alexander Graf71fbfd52010-03-24 21:48:29 +01001159 default:
1160 r = -EINVAL;
1161 break;
1162 }
1163
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001164 if (!r)
1165 r = kvmppc_sanity_check(vcpu);
1166
Alexander Graf71fbfd52010-03-24 21:48:29 +01001167 return r;
1168}
1169
Paul Mackerras34a75b02016-08-10 11:27:27 +10001170bool kvm_arch_intc_initialized(struct kvm *kvm)
1171{
1172#ifdef CONFIG_KVM_MPIC
1173 if (kvm->arch.mpic)
1174 return true;
1175#endif
1176#ifdef CONFIG_KVM_XICS
1177 if (kvm->arch.xics)
1178 return true;
1179#endif
1180 return false;
1181}
1182
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001183int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1184 struct kvm_mp_state *mp_state)
1185{
1186 return -EINVAL;
1187}
1188
1189int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1190 struct kvm_mp_state *mp_state)
1191{
1192 return -EINVAL;
1193}
1194
1195long kvm_arch_vcpu_ioctl(struct file *filp,
1196 unsigned int ioctl, unsigned long arg)
1197{
1198 struct kvm_vcpu *vcpu = filp->private_data;
1199 void __user *argp = (void __user *)arg;
1200 long r;
1201
Avi Kivity93736622010-05-13 12:35:17 +03001202 switch (ioctl) {
1203 case KVM_INTERRUPT: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001204 struct kvm_interrupt irq;
1205 r = -EFAULT;
1206 if (copy_from_user(&irq, argp, sizeof(irq)))
Avi Kivity93736622010-05-13 12:35:17 +03001207 goto out;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001208 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
Avi Kivity93736622010-05-13 12:35:17 +03001209 goto out;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001210 }
Avi Kivity19483d12010-05-13 12:30:43 +03001211
Alexander Graf71fbfd52010-03-24 21:48:29 +01001212 case KVM_ENABLE_CAP:
1213 {
1214 struct kvm_enable_cap cap;
1215 r = -EFAULT;
1216 if (copy_from_user(&cap, argp, sizeof(cap)))
1217 goto out;
1218 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1219 break;
1220 }
Scott Wooddc83b8b2011-08-18 15:25:21 -05001221
Alexander Grafe24ed812011-09-14 10:02:41 +02001222 case KVM_SET_ONE_REG:
1223 case KVM_GET_ONE_REG:
1224 {
1225 struct kvm_one_reg reg;
1226 r = -EFAULT;
1227 if (copy_from_user(&reg, argp, sizeof(reg)))
1228 goto out;
1229 if (ioctl == KVM_SET_ONE_REG)
1230 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1231 else
1232 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1233 break;
1234 }
1235
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001236#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -05001237 case KVM_DIRTY_TLB: {
1238 struct kvm_dirty_tlb dirty;
1239 r = -EFAULT;
1240 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1241 goto out;
1242 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1243 break;
1244 }
1245#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001246 default:
1247 r = -EINVAL;
1248 }
1249
1250out:
1251 return r;
1252}
1253
Carsten Otte5b1c1492012-01-04 10:25:23 +01001254int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1255{
1256 return VM_FAULT_SIGBUS;
1257}
1258
Alexander Graf15711e92010-07-29 14:48:08 +02001259static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1260{
Stuart Yoder784bafa2012-07-03 05:48:51 +00001261 u32 inst_nop = 0x60000000;
1262#ifdef CONFIG_KVM_BOOKE_HV
1263 u32 inst_sc1 = 0x44000022;
Alexander Graf27431032014-04-24 13:39:16 +02001264 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1265 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1266 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1267 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
Stuart Yoder784bafa2012-07-03 05:48:51 +00001268#else
Alexander Graf15711e92010-07-29 14:48:08 +02001269 u32 inst_lis = 0x3c000000;
1270 u32 inst_ori = 0x60000000;
Alexander Graf15711e92010-07-29 14:48:08 +02001271 u32 inst_sc = 0x44000002;
1272 u32 inst_imm_mask = 0xffff;
1273
1274 /*
1275 * The hypercall to get into KVM from within guest context is as
1276 * follows:
1277 *
1278 * lis r0, r0, KVM_SC_MAGIC_R0@h
1279 * ori r0, KVM_SC_MAGIC_R0@l
1280 * sc
1281 * nop
1282 */
Alexander Graf27431032014-04-24 13:39:16 +02001283 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1284 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1285 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1286 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
Stuart Yoder784bafa2012-07-03 05:48:51 +00001287#endif
Alexander Graf15711e92010-07-29 14:48:08 +02001288
Liu Yu-B132019202e072012-07-03 05:48:52 +00001289 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1290
Alexander Graf15711e92010-07-29 14:48:08 +02001291 return 0;
1292}
1293
Alexander Graf5efdb4b2013-04-17 00:37:57 +02001294int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1295 bool line_status)
1296{
1297 if (!irqchip_in_kernel(kvm))
1298 return -ENXIO;
1299
1300 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1301 irq_event->irq, irq_event->level,
1302 line_status);
1303 return 0;
1304}
1305
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001306
1307static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1308 struct kvm_enable_cap *cap)
1309{
1310 int r;
1311
1312 if (cap->flags)
1313 return -EINVAL;
1314
1315 switch (cap->cap) {
1316#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1317 case KVM_CAP_PPC_ENABLE_HCALL: {
1318 unsigned long hcall = cap->args[0];
1319
1320 r = -EINVAL;
1321 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1322 cap->args[1] > 1)
1323 break;
Paul Mackerrasae2113a2014-06-02 11:03:00 +10001324 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1325 break;
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001326 if (cap->args[1])
1327 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1328 else
1329 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1330 r = 0;
1331 break;
1332 }
1333#endif
1334 default:
1335 r = -EINVAL;
1336 break;
1337 }
1338
1339 return r;
1340}
1341
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001342long kvm_arch_vm_ioctl(struct file *filp,
1343 unsigned int ioctl, unsigned long arg)
1344{
Scott Wood5df554a2013-04-12 14:08:46 +00001345 struct kvm *kvm __maybe_unused = filp->private_data;
Alexander Graf15711e92010-07-29 14:48:08 +02001346 void __user *argp = (void __user *)arg;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001347 long r;
1348
1349 switch (ioctl) {
Alexander Graf15711e92010-07-29 14:48:08 +02001350 case KVM_PPC_GET_PVINFO: {
1351 struct kvm_ppc_pvinfo pvinfo;
Vasiliy Kulikovd8cdddc2010-10-30 13:04:24 +04001352 memset(&pvinfo, 0, sizeof(pvinfo));
Alexander Graf15711e92010-07-29 14:48:08 +02001353 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1354 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1355 r = -EFAULT;
1356 goto out;
1357 }
1358
1359 break;
1360 }
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001361 case KVM_ENABLE_CAP:
1362 {
1363 struct kvm_enable_cap cap;
1364 r = -EFAULT;
1365 if (copy_from_user(&cap, argp, sizeof(cap)))
1366 goto out;
1367 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1368 break;
1369 }
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001370#ifdef CONFIG_PPC_BOOK3S_64
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11001371 case KVM_CREATE_SPAPR_TCE_64: {
1372 struct kvm_create_spapr_tce_64 create_tce_64;
1373
1374 r = -EFAULT;
1375 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
1376 goto out;
1377 if (create_tce_64.flags) {
1378 r = -EINVAL;
1379 goto out;
1380 }
1381 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1382 goto out;
1383 }
David Gibson54738c02011-06-29 00:22:41 +00001384 case KVM_CREATE_SPAPR_TCE: {
1385 struct kvm_create_spapr_tce create_tce;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11001386 struct kvm_create_spapr_tce_64 create_tce_64;
David Gibson54738c02011-06-29 00:22:41 +00001387
1388 r = -EFAULT;
1389 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1390 goto out;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11001391
1392 create_tce_64.liobn = create_tce.liobn;
1393 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
1394 create_tce_64.offset = 0;
1395 create_tce_64.size = create_tce.window_size >>
1396 IOMMU_PAGE_SHIFT_4K;
1397 create_tce_64.flags = 0;
1398 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
David Gibson54738c02011-06-29 00:22:41 +00001399 goto out;
1400 }
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001401 case KVM_PPC_GET_SMMU_INFO: {
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001402 struct kvm_ppc_smmu_info info;
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301403 struct kvm *kvm = filp->private_data;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001404
1405 memset(&info, 0, sizeof(info));
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301406 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001407 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1408 r = -EFAULT;
1409 break;
1410 }
Michael Ellerman8e591cb2013-04-17 20:30:00 +00001411 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1412 struct kvm *kvm = filp->private_data;
1413
1414 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1415 break;
1416 }
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301417 default: {
1418 struct kvm *kvm = filp->private_data;
1419 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1420 }
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301421#else /* CONFIG_PPC_BOOK3S_64 */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001422 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001423 r = -ENOTTY;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301424#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001425 }
Alexander Graf15711e92010-07-29 14:48:08 +02001426out:
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001427 return r;
1428}
1429
Scott Wood043cc4d2011-12-20 15:34:20 +00001430static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1431static unsigned long nr_lpids;
1432
1433long kvmppc_alloc_lpid(void)
1434{
1435 long lpid;
1436
1437 do {
1438 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1439 if (lpid >= nr_lpids) {
1440 pr_err("%s: No LPIDs free\n", __func__);
1441 return -ENOMEM;
1442 }
1443 } while (test_and_set_bit(lpid, lpid_inuse));
1444
1445 return lpid;
1446}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301447EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001448
1449void kvmppc_claim_lpid(long lpid)
1450{
1451 set_bit(lpid, lpid_inuse);
1452}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301453EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001454
1455void kvmppc_free_lpid(long lpid)
1456{
1457 clear_bit(lpid, lpid_inuse);
1458}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301459EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001460
1461void kvmppc_init_lpid(unsigned long nr_lpids_param)
1462{
1463 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1464 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1465}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301466EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001467
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001468int kvm_arch_init(void *opaque)
1469{
1470 return 0;
1471}
1472
Paolo Bonzini478d66862014-08-05 11:29:07 +02001473EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);