blob: 70963c845e968e3feed8d5942e72f0329e34a2e4 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050024#include <linux/vmalloc.h>
Alexander Graf544c6762009-11-02 12:02:31 +000025#include <linux/hrtimer.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050026#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Scott Woodeb1e4f42013-04-12 14:08:47 +000028#include <linux/file.h>
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +053029#include <linux/module.h>
Suresh Warrier95767302016-08-19 15:35:47 +100030#include <linux/irqbypass.h>
31#include <linux/kvm_irqfd.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchard83aae4a2008-07-25 13:54:52 -050035#include <asm/tlbflush.h>
Paul Mackerras371fefd2011-06-29 00:23:08 +000036#include <asm/cputhreads.h>
Alexander Grafbd2be682012-08-13 01:04:19 +020037#include <asm/irqflags.h>
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +110038#include <asm/iommu.h>
Hollis Blanchard73e75b42008-12-02 15:51:57 -060039#include "timing.h"
Alexander Graf5efdb4b2013-04-17 00:37:57 +020040#include "irq.h"
Paul Mackerrasfad7b9b2008-12-23 14:57:26 +110041#include "../mm/mmu_decl.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050042
Marcelo Tosatti46f43c62009-06-18 11:47:27 -030043#define CREATE_TRACE_POINTS
44#include "trace.h"
45
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +053046struct kvmppc_ops *kvmppc_hv_ops;
47EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
48struct kvmppc_ops *kvmppc_pr_ops;
49EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
50
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053051
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050052int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
53{
Liu Yu-B132019202e072012-07-03 05:48:52 +000054 return !!(v->arch.pending_exceptions) ||
Scott Wooddfd4d472011-11-17 12:39:59 +000055 v->requests;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050056}
57
Christoffer Dallb6d33832012-03-08 16:44:24 -050058int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
59{
60 return 1;
61}
62
Alexander Graf03d25c52012-08-10 12:28:50 +020063/*
64 * Common checks before entering the guest world. Call with interrupts
65 * disabled.
66 *
Alexander Graf7ee78852012-08-13 12:44:41 +020067 * returns:
68 *
69 * == 1 if we're ready to go into guest state
70 * <= 0 if we need to go back to the host with return value
Alexander Graf03d25c52012-08-10 12:28:50 +020071 */
72int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
73{
Scott Wood6c85f522014-01-09 19:18:40 -060074 int r;
Alexander Graf03d25c52012-08-10 12:28:50 +020075
Scott Wood6c85f522014-01-09 19:18:40 -060076 WARN_ON(irqs_disabled());
77 hard_irq_disable();
78
Alexander Graf03d25c52012-08-10 12:28:50 +020079 while (true) {
80 if (need_resched()) {
81 local_irq_enable();
82 cond_resched();
Scott Wood6c85f522014-01-09 19:18:40 -060083 hard_irq_disable();
Alexander Graf03d25c52012-08-10 12:28:50 +020084 continue;
85 }
86
87 if (signal_pending(current)) {
Alexander Graf7ee78852012-08-13 12:44:41 +020088 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
89 vcpu->run->exit_reason = KVM_EXIT_INTR;
90 r = -EINTR;
Alexander Graf03d25c52012-08-10 12:28:50 +020091 break;
92 }
93
Scott Wood5bd1cf12012-08-22 15:03:50 +000094 vcpu->mode = IN_GUEST_MODE;
95
96 /*
97 * Reading vcpu->requests must happen after setting vcpu->mode,
98 * so we don't miss a request because the requester sees
99 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
100 * before next entering the guest (and thus doesn't IPI).
Lan Tianyu489153c2016-03-13 11:10:30 +0800101 * This also orders the write to mode from any reads
102 * to the page tables done while the VCPU is running.
103 * Please see the comment in kvm_flush_remote_tlbs.
Scott Wood5bd1cf12012-08-22 15:03:50 +0000104 */
Alexander Graf03d25c52012-08-10 12:28:50 +0200105 smp_mb();
Scott Wood5bd1cf12012-08-22 15:03:50 +0000106
Alexander Graf03d25c52012-08-10 12:28:50 +0200107 if (vcpu->requests) {
108 /* Make sure we process requests preemptable */
109 local_irq_enable();
110 trace_kvm_check_requests(vcpu);
Alexander Graf7c973a22012-08-13 12:50:35 +0200111 r = kvmppc_core_check_requests(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -0600112 hard_irq_disable();
Alexander Graf7c973a22012-08-13 12:50:35 +0200113 if (r > 0)
114 continue;
115 break;
Alexander Graf03d25c52012-08-10 12:28:50 +0200116 }
117
118 if (kvmppc_core_prepare_to_enter(vcpu)) {
119 /* interrupts got enabled in between, so we
120 are back at square 1 */
121 continue;
122 }
123
Paolo Bonzini6edaa532016-06-15 15:18:26 +0200124 guest_enter_irqoff();
Scott Wood6c85f522014-01-09 19:18:40 -0600125 return 1;
Alexander Graf03d25c52012-08-10 12:28:50 +0200126 }
127
Scott Wood6c85f522014-01-09 19:18:40 -0600128 /* return to host */
129 local_irq_enable();
Alexander Graf03d25c52012-08-10 12:28:50 +0200130 return r;
131}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530132EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
Alexander Graf03d25c52012-08-10 12:28:50 +0200133
Alexander Graf5deb8e72014-04-24 13:46:24 +0200134#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
135static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
136{
137 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
138 int i;
139
140 shared->sprg0 = swab64(shared->sprg0);
141 shared->sprg1 = swab64(shared->sprg1);
142 shared->sprg2 = swab64(shared->sprg2);
143 shared->sprg3 = swab64(shared->sprg3);
144 shared->srr0 = swab64(shared->srr0);
145 shared->srr1 = swab64(shared->srr1);
146 shared->dar = swab64(shared->dar);
147 shared->msr = swab64(shared->msr);
148 shared->dsisr = swab32(shared->dsisr);
149 shared->int_pending = swab32(shared->int_pending);
150 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
151 shared->sr[i] = swab32(shared->sr[i]);
152}
153#endif
154
Alexander Graf2a342ed2010-07-29 14:47:48 +0200155int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
156{
157 int nr = kvmppc_get_gpr(vcpu, 11);
158 int r;
159 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
160 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
161 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
162 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
163 unsigned long r2 = 0;
164
Alexander Graf5deb8e72014-04-24 13:46:24 +0200165 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
Alexander Graf2a342ed2010-07-29 14:47:48 +0200166 /* 32 bit mode */
167 param1 &= 0xffffffff;
168 param2 &= 0xffffffff;
169 param3 &= 0xffffffff;
170 param4 &= 0xffffffff;
171 }
172
173 switch (nr) {
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000174 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
Alexander Graf5fc87402010-07-29 14:47:55 +0200175 {
Alexander Graf5deb8e72014-04-24 13:46:24 +0200176#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
177 /* Book3S can be little endian, find it out here */
178 int shared_big_endian = true;
179 if (vcpu->arch.intr_msr & MSR_LE)
180 shared_big_endian = false;
181 if (shared_big_endian != vcpu->arch.shared_big_endian)
182 kvmppc_swab_shared(vcpu);
183 vcpu->arch.shared_big_endian = shared_big_endian;
184#endif
185
Alexander Graff3383cf2014-05-12 01:08:32 +0200186 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
187 /*
188 * Older versions of the Linux magic page code had
189 * a bug where they would map their trampoline code
190 * NX. If that's the case, remove !PR NX capability.
191 */
192 vcpu->arch.disable_kernel_nx = true;
193 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
194 }
195
196 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
197 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
Alexander Graf5fc87402010-07-29 14:47:55 +0200198
Alexander Graf89b68c92014-07-13 16:37:12 +0200199#ifdef CONFIG_PPC_64K_PAGES
200 /*
201 * Make sure our 4k magic page is in the same window of a 64k
202 * page within the guest and within the host's page.
203 */
204 if ((vcpu->arch.magic_page_pa & 0xf000) !=
205 ((ulong)vcpu->arch.shared & 0xf000)) {
206 void *old_shared = vcpu->arch.shared;
207 ulong shared = (ulong)vcpu->arch.shared;
208 void *new_shared;
209
210 shared &= PAGE_MASK;
211 shared |= vcpu->arch.magic_page_pa & 0xf000;
212 new_shared = (void*)shared;
213 memcpy(new_shared, old_shared, 0x1000);
214 vcpu->arch.shared = new_shared;
215 }
216#endif
217
Scott Woodb5904972011-11-08 18:23:30 -0600218 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
Alexander Graf7508e162010-08-03 11:32:56 +0200219
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000220 r = EV_SUCCESS;
Alexander Graf5fc87402010-07-29 14:47:55 +0200221 break;
222 }
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000223 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
224 r = EV_SUCCESS;
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000225#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
Alexander Graf5fc87402010-07-29 14:47:55 +0200226 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
227#endif
Alexander Graf2a342ed2010-07-29 14:47:48 +0200228
229 /* Second return value is in r4 */
Alexander Graf2a342ed2010-07-29 14:47:48 +0200230 break;
Liu Yu-B132019202e072012-07-03 05:48:52 +0000231 case EV_HCALL_TOKEN(EV_IDLE):
232 r = EV_SUCCESS;
233 kvm_vcpu_block(vcpu);
234 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
235 break;
Alexander Graf2a342ed2010-07-29 14:47:48 +0200236 default:
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000237 r = EV_UNIMPLEMENTED;
Alexander Graf2a342ed2010-07-29 14:47:48 +0200238 break;
239 }
240
Alexander Graf7508e162010-08-03 11:32:56 +0200241 kvmppc_set_gpr(vcpu, 4, r2);
242
Alexander Graf2a342ed2010-07-29 14:47:48 +0200243 return r;
244}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530245EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500246
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200247int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
248{
249 int r = false;
250
251 /* We have to know what CPU to virtualize */
252 if (!vcpu->arch.pvr)
253 goto out;
254
255 /* PAPR only works with book3s_64 */
256 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
257 goto out;
258
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200259 /* HV KVM can only do PAPR mode for now */
Aneesh Kumar K.Va78b55d2013-10-07 22:18:02 +0530260 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200261 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200262
Scott Woodd30f6e42011-12-20 15:34:43 +0000263#ifdef CONFIG_KVM_BOOKE_HV
264 if (!cpu_has_feature(CPU_FTR_EMB_HV))
265 goto out;
266#endif
267
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200268 r = true;
269
270out:
271 vcpu->arch.sane = r;
272 return r ? 0 : -EINVAL;
273}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530274EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200275
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500276int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
277{
278 enum emulation_result er;
279 int r;
280
Alexander Grafd69614a2014-06-18 14:53:49 +0200281 er = kvmppc_emulate_loadstore(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500282 switch (er) {
283 case EMULATE_DONE:
284 /* Future optimization: only reload non-volatiles if they were
285 * actually modified. */
286 r = RESUME_GUEST_NV;
287 break;
Mihai Caraman51f04722014-07-23 19:06:21 +0300288 case EMULATE_AGAIN:
289 r = RESUME_GUEST;
290 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500291 case EMULATE_DO_MMIO:
292 run->exit_reason = KVM_EXIT_MMIO;
293 /* We must reload nonvolatiles because "update" load/store
294 * instructions modify register state. */
295 /* Future optimization: only reload non-volatiles if they were
296 * actually modified. */
297 r = RESUME_HOST_NV;
298 break;
299 case EMULATE_FAIL:
Mihai Caraman51f04722014-07-23 19:06:21 +0300300 {
301 u32 last_inst;
302
Alexander Graf8d0eff62014-09-10 14:37:29 +0200303 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500304 /* XXX Deliver Program interrupt to guest. */
Mihai Caraman51f04722014-07-23 19:06:21 +0300305 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500306 r = RESUME_HOST;
307 break;
Mihai Caraman51f04722014-07-23 19:06:21 +0300308 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500309 default:
Alexander Graf5a331692012-12-14 23:46:03 +0100310 WARN_ON(1);
311 r = RESUME_GUEST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500312 }
313
314 return r;
315}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530316EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500317
Alexander Graf35c4a732014-06-20 13:58:16 +0200318int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
319 bool data)
320{
Alexander Grafc12fb432014-06-20 14:43:36 +0200321 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
Alexander Graf35c4a732014-06-20 13:58:16 +0200322 struct kvmppc_pte pte;
323 int r;
324
325 vcpu->stat.st++;
326
327 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
328 XLATE_WRITE, &pte);
329 if (r < 0)
330 return r;
331
332 *eaddr = pte.raddr;
333
334 if (!pte.may_write)
335 return -EPERM;
336
Alexander Grafc12fb432014-06-20 14:43:36 +0200337 /* Magic page override */
338 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
339 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
340 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
341 void *magic = vcpu->arch.shared;
342 magic += pte.eaddr & 0xfff;
343 memcpy(magic, ptr, size);
344 return EMULATE_DONE;
345 }
346
Alexander Graf35c4a732014-06-20 13:58:16 +0200347 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
348 return EMULATE_DO_MMIO;
349
350 return EMULATE_DONE;
351}
352EXPORT_SYMBOL_GPL(kvmppc_st);
353
354int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
355 bool data)
356{
Alexander Grafc12fb432014-06-20 14:43:36 +0200357 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
Alexander Graf35c4a732014-06-20 13:58:16 +0200358 struct kvmppc_pte pte;
Alexander Graf35c4a732014-06-20 13:58:16 +0200359 int rc;
360
361 vcpu->stat.ld++;
362
363 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
364 XLATE_READ, &pte);
365 if (rc)
366 return rc;
367
368 *eaddr = pte.raddr;
369
370 if (!pte.may_read)
371 return -EPERM;
372
373 if (!data && !pte.may_execute)
374 return -ENOEXEC;
375
Alexander Grafc12fb432014-06-20 14:43:36 +0200376 /* Magic page override */
377 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
378 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
379 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
380 void *magic = vcpu->arch.shared;
381 magic += pte.eaddr & 0xfff;
382 memcpy(ptr, magic, size);
383 return EMULATE_DONE;
384 }
385
Alexander Grafc45c5512014-06-20 14:17:30 +0200386 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
387 return EMULATE_DO_MMIO;
Alexander Graf35c4a732014-06-20 13:58:16 +0200388
389 return EMULATE_DONE;
Alexander Graf35c4a732014-06-20 13:58:16 +0200390}
391EXPORT_SYMBOL_GPL(kvmppc_ld);
392
Radim Krčmář13a34e02014-08-28 15:13:03 +0200393int kvm_arch_hardware_enable(void)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500394{
Alexander Graf10474ae2009-09-15 11:37:46 +0200395 return 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500396}
397
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500398int kvm_arch_hardware_setup(void)
399{
400 return 0;
401}
402
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500403void kvm_arch_check_processor_compat(void *rtn)
404{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600405 *(int *)rtn = kvmppc_core_check_processor_compat();
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500406}
407
Carsten Ottee08b9632012-01-04 10:25:20 +0100408int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500409{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530410 struct kvmppc_ops *kvm_ops = NULL;
411 /*
412 * if we have both HV and PR enabled, default is HV
413 */
414 if (type == 0) {
415 if (kvmppc_hv_ops)
416 kvm_ops = kvmppc_hv_ops;
417 else
418 kvm_ops = kvmppc_pr_ops;
419 if (!kvm_ops)
420 goto err_out;
421 } else if (type == KVM_VM_PPC_HV) {
422 if (!kvmppc_hv_ops)
423 goto err_out;
424 kvm_ops = kvmppc_hv_ops;
425 } else if (type == KVM_VM_PPC_PR) {
426 if (!kvmppc_pr_ops)
427 goto err_out;
428 kvm_ops = kvmppc_pr_ops;
429 } else
430 goto err_out;
Carsten Ottee08b9632012-01-04 10:25:20 +0100431
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530432 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
433 return -ENOENT;
434
435 kvm->arch.kvm_ops = kvm_ops;
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000436 return kvmppc_core_init_vm(kvm);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530437err_out:
438 return -EINVAL;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500439}
440
Luiz Capitulino235539b2016-09-07 14:47:23 -0400441bool kvm_arch_has_vcpu_debugfs(void)
442{
443 return false;
444}
445
446int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
447{
448 return 0;
449}
450
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100451void kvm_arch_destroy_vm(struct kvm *kvm)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500452{
453 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300454 struct kvm_vcpu *vcpu;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500455
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600456#ifdef CONFIG_KVM_XICS
457 /*
458 * We call kick_all_cpus_sync() to ensure that all
459 * CPUs have executed any pending IPIs before we
460 * continue and free VCPUs structures below.
461 */
462 if (is_kvmppc_hv_enabled(kvm))
463 kick_all_cpus_sync();
464#endif
465
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300466 kvm_for_each_vcpu(i, vcpu, kvm)
467 kvm_arch_vcpu_free(vcpu);
468
469 mutex_lock(&kvm->lock);
470 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
471 kvm->vcpus[i] = NULL;
472
473 atomic_set(&kvm->online_vcpus, 0);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000474
475 kvmppc_core_destroy_vm(kvm);
476
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300477 mutex_unlock(&kvm->lock);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530478
479 /* drop the module reference */
480 module_put(kvm->arch.kvm_ops->owner);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500481}
482
Alexander Graf784aa3d2014-07-14 18:27:35 +0200483int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500484{
485 int r;
Alexander Graf7a587772014-07-14 18:55:19 +0200486 /* Assume we're using HV mode when the HV module is loaded */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530487 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500488
Alexander Graf7a587772014-07-14 18:55:19 +0200489 if (kvm) {
490 /*
491 * Hooray - we know which VM type we're running on. Depend on
492 * that rather than the guess above.
493 */
494 hv_enabled = is_kvmppc_hv_enabled(kvm);
495 }
496
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500497 switch (ext) {
Scott Wood5ce941e2011-04-27 17:24:21 -0500498#ifdef CONFIG_BOOKE
499 case KVM_CAP_PPC_BOOKE_SREGS:
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000500 case KVM_CAP_PPC_BOOKE_WATCHDOG:
Alexander Graf1c810632013-01-04 18:12:48 +0100501 case KVM_CAP_PPC_EPR:
Scott Wood5ce941e2011-04-27 17:24:21 -0500502#else
Alexander Grafe15a1132009-11-30 03:02:02 +0000503 case KVM_CAP_PPC_SEGSTATE:
Alexander Graf1022fc32011-09-14 21:45:23 +0200504 case KVM_CAP_PPC_HIOR:
Alexander Graf930b4122011-08-08 17:29:42 +0200505 case KVM_CAP_PPC_PAPR:
Scott Wood5ce941e2011-04-27 17:24:21 -0500506#endif
Alexander Graf18978762010-03-24 21:48:18 +0100507 case KVM_CAP_PPC_UNSET_IRQ:
Alexander Graf7b4203e2010-08-30 13:50:45 +0200508 case KVM_CAP_PPC_IRQ_LEVEL:
Alexander Graf71fbfd52010-03-24 21:48:29 +0100509 case KVM_CAP_ENABLE_CAP:
Paul Mackerras699a0ea2014-06-02 11:02:59 +1000510 case KVM_CAP_ENABLE_CAP_VM:
Alexander Grafe24ed812011-09-14 10:02:41 +0200511 case KVM_CAP_ONE_REG:
Alexander Graf0e673fb2012-10-09 00:06:20 +0200512 case KVM_CAP_IOEVENTFD:
Scott Wood5df554ad2013-04-12 14:08:46 +0000513 case KVM_CAP_DEVICE_CTRL:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000514 r = 1;
515 break;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000516 case KVM_CAP_PPC_PAIRED_SINGLES:
Alexander Grafad0a0482010-03-24 21:48:30 +0100517 case KVM_CAP_PPC_OSI:
Alexander Graf15711e92010-07-29 14:48:08 +0200518 case KVM_CAP_PPC_GET_PVINFO:
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000519#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -0500520 case KVM_CAP_SW_TLB:
521#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530522 /* We support this only for PR */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530523 r = !hv_enabled;
Alexander Grafe15a1132009-11-30 03:02:02 +0000524 break;
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530525#ifdef CONFIG_KVM_MMIO
Laurent Vivier588968b2008-05-30 16:05:56 +0200526 case KVM_CAP_COALESCED_MMIO:
527 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
528 break;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000529#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530530#ifdef CONFIG_KVM_MPIC
531 case KVM_CAP_IRQ_MPIC:
532 r = 1;
533 break;
534#endif
535
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000536#ifdef CONFIG_PPC_BOOK3S_64
David Gibson54738c02011-06-29 00:22:41 +0000537 case KVM_CAP_SPAPR_TCE:
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +1100538 case KVM_CAP_SPAPR_TCE_64:
Paul Mackerras32fad282012-05-04 02:32:53 +0000539 case KVM_CAP_PPC_ALLOC_HTAB:
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000540 case KVM_CAP_PPC_RTAS:
Alexander Graff2e91042014-05-22 17:40:15 +0200541 case KVM_CAP_PPC_FIXUP_HCALL:
Paul Mackerras699a0ea2014-06-02 11:02:59 +1000542 case KVM_CAP_PPC_ENABLE_HCALL:
Paul Mackerras5975a2e2013-04-27 00:28:37 +0000543#ifdef CONFIG_KVM_XICS
544 case KVM_CAP_IRQ_XICS:
545#endif
David Gibson54738c02011-06-29 00:22:41 +0000546 r = 1;
547 break;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000548#endif /* CONFIG_PPC_BOOK3S_64 */
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530549#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerras371fefd2011-06-29 00:23:08 +0000550 case KVM_CAP_PPC_SMT:
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530551 if (hv_enabled)
Michael Ellerman3102f782014-05-23 18:15:29 +1000552 r = threads_per_subcore;
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530553 else
554 r = 0;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000555 break;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000556 case KVM_CAP_PPC_RMA:
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100557 r = 0;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000558 break;
Michael Ellermane928e9c2015-03-20 20:39:41 +1100559 case KVM_CAP_PPC_HWRNG:
560 r = kvmppc_hwrng_present();
561 break;
David Gibson54738c02011-06-29 00:22:41 +0000562#endif
Alexander Graff4800b12012-08-07 10:24:14 +0200563 case KVM_CAP_SYNC_MMU:
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530564#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100565 r = hv_enabled;
Alexander Graff4800b12012-08-07 10:24:14 +0200566#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
567 r = 1;
568#else
569 r = 0;
Paul Mackerrasa2932922012-11-19 22:57:20 +0000570#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530571 break;
572#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasa2932922012-11-19 22:57:20 +0000573 case KVM_CAP_PPC_HTAB_FD:
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530574 r = hv_enabled;
Paul Mackerrasa2932922012-11-19 22:57:20 +0000575 break;
Alexander Graff4800b12012-08-07 10:24:14 +0200576#endif
Matt Evansb5434032011-12-07 16:55:57 +0000577 case KVM_CAP_NR_VCPUS:
578 /*
579 * Recommending a number of CPUs is somewhat arbitrary; we
580 * return the number of present CPUs for -HV (since a host
581 * will have secondary threads "offline"), and for other KVM
582 * implementations just count online CPUs.
583 */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530584 if (hv_enabled)
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530585 r = num_present_cpus();
586 else
587 r = num_online_cpus();
Matt Evansb5434032011-12-07 16:55:57 +0000588 break;
Nikunj A Dadhaniabfec5c2c2015-10-16 10:27:53 +0530589 case KVM_CAP_NR_MEMSLOTS:
590 r = KVM_USER_MEM_SLOTS;
591 break;
Matt Evansb5434032011-12-07 16:55:57 +0000592 case KVM_CAP_MAX_VCPUS:
593 r = KVM_MAX_VCPUS;
594 break;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000595#ifdef CONFIG_PPC_BOOK3S_64
596 case KVM_CAP_PPC_GET_SMMU_INFO:
597 r = 1;
598 break;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100599 case KVM_CAP_SPAPR_MULTITCE:
600 r = 1;
601 break;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000602#endif
Sam Bobroff23528bb2016-07-20 13:41:36 +1000603 case KVM_CAP_PPC_HTM:
604 r = cpu_has_feature(CPU_FTR_TM_COMP) &&
605 is_kvmppc_hv_enabled(kvm);
606 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500607 default:
608 r = 0;
609 break;
610 }
611 return r;
612
613}
614
615long kvm_arch_dev_ioctl(struct file *filp,
616 unsigned int ioctl, unsigned long arg)
617{
618 return -EINVAL;
619}
620
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530621void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900622 struct kvm_memory_slot *dont)
623{
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530624 kvmppc_core_free_memslot(kvm, free, dont);
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900625}
626
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530627int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
628 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900629{
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530630 return kvmppc_core_create_memslot(kvm, slot, npages);
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900631}
632
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200633int kvm_arch_prepare_memory_region(struct kvm *kvm,
Takuya Yoshikawa462fce42013-02-27 19:41:56 +0900634 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200635 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +0900636 enum kvm_mr_change change)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500637{
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000638 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500639}
640
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200641void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200642 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900643 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200644 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900645 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200646{
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200647 kvmppc_core_commit_memory_region(kvm, mem, old, new);
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200648}
649
Marcelo Tosatti2df72e92012-08-24 15:54:57 -0300650void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
651 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300652{
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000653 kvmppc_core_flush_memslot(kvm, slot);
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300654}
655
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500656struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
657{
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600658 struct kvm_vcpu *vcpu;
659 vcpu = kvmppc_core_vcpu_create(kvm, id);
Matt Evans03cdab52011-12-06 21:19:42 +0000660 if (!IS_ERR(vcpu)) {
661 vcpu->arch.wqp = &vcpu->wq;
Wei Yongjun06056bf2010-03-09 14:13:43 +0800662 kvmppc_create_vcpu_debugfs(vcpu, id);
Matt Evans03cdab52011-12-06 21:19:42 +0000663 }
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600664 return vcpu;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500665}
666
Dominik Dingel31928aa2014-12-04 15:47:07 +0100667void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200668{
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200669}
670
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500671void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
672{
Alexander Grafa5954052010-02-22 16:52:14 +0100673 /* Make sure we're not using the vcpu anymore */
674 hrtimer_cancel(&vcpu->arch.dec_timer);
Alexander Grafa5954052010-02-22 16:52:14 +0100675
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600676 kvmppc_remove_vcpu_debugfs(vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000677
678 switch (vcpu->arch.irq_type) {
679 case KVMPPC_IRQ_MPIC:
680 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
681 break;
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000682 case KVMPPC_IRQ_XICS:
683 kvmppc_xics_free_icp(vcpu);
684 break;
Scott Woodeb1e4f42013-04-12 14:08:47 +0000685 }
686
Hollis Blancharddb93f572008-11-05 09:36:18 -0600687 kvmppc_core_vcpu_free(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500688}
689
690void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
691{
692 kvm_arch_vcpu_free(vcpu);
693}
694
695int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
696{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600697 return kvmppc_core_pending_dec(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500698}
699
Thomas Huth5358a962015-05-22 09:25:02 +0200700static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
Alexander Graf544c6762009-11-02 12:02:31 +0000701{
702 struct kvm_vcpu *vcpu;
703
704 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
Mihai Caramand02d4d12014-09-01 17:19:56 +0300705 kvmppc_decrementer_func(vcpu);
Alexander Graf544c6762009-11-02 12:02:31 +0000706
707 return HRTIMER_NORESTART;
708}
709
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500710int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
711{
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000712 int ret;
713
Alexander Graf544c6762009-11-02 12:02:31 +0000714 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
Alexander Graf544c6762009-11-02 12:02:31 +0000715 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000716 vcpu->arch.dec_expires = ~(u64)0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500717
Bharat Bhushan09000ad2011-03-25 10:32:13 +0530718#ifdef CONFIG_KVM_EXIT_TIMING
719 mutex_init(&vcpu->arch.exit_timing_lock);
720#endif
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000721 ret = kvmppc_subarch_vcpu_init(vcpu);
722 return ret;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500723}
724
725void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
726{
Hollis Blanchardecc09812009-01-03 16:22:59 -0600727 kvmppc_mmu_destroy(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000728 kvmppc_subarch_vcpu_uninit(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500729}
730
731void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
732{
Scott Woodeab17672011-04-27 17:24:10 -0500733#ifdef CONFIG_BOOKE
734 /*
735 * vrsave (formerly usprg0) isn't used by Linux, but may
736 * be used by the guest.
737 *
738 * On non-booke this is associated with Altivec and
739 * is handled by code in book3s.c.
740 */
741 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
742#endif
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600743 kvmppc_core_vcpu_load(vcpu, cpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500744}
745
746void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
747{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600748 kvmppc_core_vcpu_put(vcpu);
Scott Woodeab17672011-04-27 17:24:10 -0500749#ifdef CONFIG_BOOKE
750 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
751#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500752}
753
Suresh Warrier95767302016-08-19 15:35:47 +1000754/*
755 * irq_bypass_add_producer and irq_bypass_del_producer are only
756 * useful if the architecture supports PCI passthrough.
757 * irq_bypass_stop and irq_bypass_start are not needed and so
758 * kvm_ops are not defined for them.
759 */
760bool kvm_arch_has_irq_bypass(void)
761{
762 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
763 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
764}
765
766int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
767 struct irq_bypass_producer *prod)
768{
769 struct kvm_kernel_irqfd *irqfd =
770 container_of(cons, struct kvm_kernel_irqfd, consumer);
771 struct kvm *kvm = irqfd->kvm;
772
773 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
774 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
775
776 return 0;
777}
778
779void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
780 struct irq_bypass_producer *prod)
781{
782 struct kvm_kernel_irqfd *irqfd =
783 container_of(cons, struct kvm_kernel_irqfd, consumer);
784 struct kvm *kvm = irqfd->kvm;
785
786 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
787 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
788}
789
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500790static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
791 struct kvm_run *run)
792{
Denis Kirjanov69b61832010-06-11 11:23:26 +0000793 u64 uninitialized_var(gpr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500794
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100795 if (run->mmio.len > sizeof(gpr)) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500796 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
797 return;
798 }
799
David Gibsond078eed2015-02-03 16:36:24 +1100800 if (!vcpu->arch.mmio_host_swabbed) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500801 switch (run->mmio.len) {
Alexander Grafb104d062010-02-19 11:00:29 +0100802 case 8: gpr = *(u64 *)run->mmio.data; break;
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100803 case 4: gpr = *(u32 *)run->mmio.data; break;
804 case 2: gpr = *(u16 *)run->mmio.data; break;
805 case 1: gpr = *(u8 *)run->mmio.data; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500806 }
807 } else {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500808 switch (run->mmio.len) {
David Gibsond078eed2015-02-03 16:36:24 +1100809 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
810 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
811 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100812 case 1: gpr = *(u8 *)run->mmio.data; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500813 }
814 }
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100815
Alexander Graf3587d532010-02-19 11:00:30 +0100816 if (vcpu->arch.mmio_sign_extend) {
817 switch (run->mmio.len) {
818#ifdef CONFIG_PPC64
819 case 4:
820 gpr = (s64)(s32)gpr;
821 break;
822#endif
823 case 2:
824 gpr = (s64)(s16)gpr;
825 break;
826 case 1:
827 gpr = (s64)(s8)gpr;
828 break;
829 }
830 }
831
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100832 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
Alexander Grafb104d062010-02-19 11:00:29 +0100833
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100834 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
835 case KVM_MMIO_REG_GPR:
Alexander Grafb104d062010-02-19 11:00:29 +0100836 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
837 break;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100838 case KVM_MMIO_REG_FPR:
Paul Mackerrasefff1912013-10-15 20:43:02 +1100839 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +0100840 break;
Alexander Graf287d5612010-04-01 15:33:21 +0200841#ifdef CONFIG_PPC_BOOK3S
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100842 case KVM_MMIO_REG_QPR:
843 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +0100844 break;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100845 case KVM_MMIO_REG_FQPR:
Paul Mackerrasefff1912013-10-15 20:43:02 +1100846 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +0100847 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +0100848 break;
Alexander Graf287d5612010-04-01 15:33:21 +0200849#endif
Alexander Grafb104d062010-02-19 11:00:29 +0100850 default:
851 BUG();
852 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500853}
854
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000855static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
856 unsigned int rt, unsigned int bytes,
857 int is_default_endian, int sign_extend)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500858{
Scott Wooded840ee2013-04-26 14:53:39 +0000859 int idx, ret;
David Gibsond078eed2015-02-03 16:36:24 +1100860 bool host_swabbed;
Cédric Le Goater73601772014-01-09 11:51:16 +0100861
David Gibsond078eed2015-02-03 16:36:24 +1100862 /* Pity C doesn't have a logical XOR operator */
Cédric Le Goater73601772014-01-09 11:51:16 +0100863 if (kvmppc_need_byteswap(vcpu)) {
David Gibsond078eed2015-02-03 16:36:24 +1100864 host_swabbed = is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100865 } else {
David Gibsond078eed2015-02-03 16:36:24 +1100866 host_swabbed = !is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100867 }
Scott Wooded840ee2013-04-26 14:53:39 +0000868
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500869 if (bytes > sizeof(run->mmio.data)) {
870 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
871 run->mmio.len);
872 }
873
874 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
875 run->mmio.len = bytes;
876 run->mmio.is_write = 0;
877
878 vcpu->arch.io_gpr = rt;
David Gibsond078eed2015-02-03 16:36:24 +1100879 vcpu->arch.mmio_host_swabbed = host_swabbed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500880 vcpu->mmio_needed = 1;
881 vcpu->mmio_is_write = 0;
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000882 vcpu->arch.mmio_sign_extend = sign_extend;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500883
Scott Wooded840ee2013-04-26 14:53:39 +0000884 idx = srcu_read_lock(&vcpu->kvm->srcu);
885
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000886 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
Scott Wooded840ee2013-04-26 14:53:39 +0000887 bytes, &run->mmio.data);
888
889 srcu_read_unlock(&vcpu->kvm->srcu, idx);
890
891 if (!ret) {
Alexander Graf0e673fb2012-10-09 00:06:20 +0200892 kvmppc_complete_mmio_load(vcpu, run);
893 vcpu->mmio_needed = 0;
894 return EMULATE_DONE;
895 }
896
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500897 return EMULATE_DO_MMIO;
898}
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000899
900int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
901 unsigned int rt, unsigned int bytes,
902 int is_default_endian)
903{
904 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
905}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530906EXPORT_SYMBOL_GPL(kvmppc_handle_load);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500907
Alexander Graf3587d532010-02-19 11:00:30 +0100908/* Same as above, but sign extends */
909int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +0100910 unsigned int rt, unsigned int bytes,
911 int is_default_endian)
Alexander Graf3587d532010-02-19 11:00:30 +0100912{
Paul Mackerraseb8b0562016-05-05 16:17:10 +1000913 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
Alexander Graf3587d532010-02-19 11:00:30 +0100914}
915
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500916int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +0100917 u64 val, unsigned int bytes, int is_default_endian)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500918{
919 void *data = run->mmio.data;
Scott Wooded840ee2013-04-26 14:53:39 +0000920 int idx, ret;
David Gibsond078eed2015-02-03 16:36:24 +1100921 bool host_swabbed;
Cédric Le Goater73601772014-01-09 11:51:16 +0100922
David Gibsond078eed2015-02-03 16:36:24 +1100923 /* Pity C doesn't have a logical XOR operator */
Cédric Le Goater73601772014-01-09 11:51:16 +0100924 if (kvmppc_need_byteswap(vcpu)) {
David Gibsond078eed2015-02-03 16:36:24 +1100925 host_swabbed = is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100926 } else {
David Gibsond078eed2015-02-03 16:36:24 +1100927 host_swabbed = !is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +0100928 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500929
930 if (bytes > sizeof(run->mmio.data)) {
931 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
932 run->mmio.len);
933 }
934
935 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
936 run->mmio.len = bytes;
937 run->mmio.is_write = 1;
938 vcpu->mmio_needed = 1;
939 vcpu->mmio_is_write = 1;
940
941 /* Store the value at the lowest bytes in 'data'. */
David Gibsond078eed2015-02-03 16:36:24 +1100942 if (!host_swabbed) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500943 switch (bytes) {
Alexander Grafb104d062010-02-19 11:00:29 +0100944 case 8: *(u64 *)data = val; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500945 case 4: *(u32 *)data = val; break;
946 case 2: *(u16 *)data = val; break;
947 case 1: *(u8 *)data = val; break;
948 }
949 } else {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500950 switch (bytes) {
David Gibsond078eed2015-02-03 16:36:24 +1100951 case 8: *(u64 *)data = swab64(val); break;
952 case 4: *(u32 *)data = swab32(val); break;
953 case 2: *(u16 *)data = swab16(val); break;
954 case 1: *(u8 *)data = val; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500955 }
956 }
957
Scott Wooded840ee2013-04-26 14:53:39 +0000958 idx = srcu_read_lock(&vcpu->kvm->srcu);
959
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000960 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
Scott Wooded840ee2013-04-26 14:53:39 +0000961 bytes, &run->mmio.data);
962
963 srcu_read_unlock(&vcpu->kvm->srcu, idx);
964
965 if (!ret) {
Alexander Graf0e673fb2012-10-09 00:06:20 +0200966 vcpu->mmio_needed = 0;
967 return EMULATE_DONE;
968 }
969
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500970 return EMULATE_DO_MMIO;
971}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530972EXPORT_SYMBOL_GPL(kvmppc_handle_store);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500973
Mihai Caraman8a41ea52014-08-20 16:36:24 +0300974int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
975{
976 int r = 0;
977 union kvmppc_one_reg val;
978 int size;
979
980 size = one_reg_size(reg->id);
981 if (size > sizeof(val))
982 return -EINVAL;
983
984 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
985 if (r == -EINVAL) {
986 r = 0;
987 switch (reg->id) {
Mihai Caraman3840edc2014-08-20 16:36:25 +0300988#ifdef CONFIG_ALTIVEC
989 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
990 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
991 r = -ENXIO;
992 break;
993 }
Greg Kurzb4d7f162016-01-13 18:28:17 +0100994 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
Mihai Caraman3840edc2014-08-20 16:36:25 +0300995 break;
996 case KVM_REG_PPC_VSCR:
997 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
998 r = -ENXIO;
999 break;
1000 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001001 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001002 break;
1003 case KVM_REG_PPC_VRSAVE:
Greg Kurzb4d7f162016-01-13 18:28:17 +01001004 val = get_reg_val(reg->id, vcpu->arch.vrsave);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001005 break;
1006#endif /* CONFIG_ALTIVEC */
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001007 default:
1008 r = -EINVAL;
1009 break;
1010 }
1011 }
1012
1013 if (r)
1014 return r;
1015
1016 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1017 r = -EFAULT;
1018
1019 return r;
1020}
1021
1022int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1023{
1024 int r;
1025 union kvmppc_one_reg val;
1026 int size;
1027
1028 size = one_reg_size(reg->id);
1029 if (size > sizeof(val))
1030 return -EINVAL;
1031
1032 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1033 return -EFAULT;
1034
1035 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1036 if (r == -EINVAL) {
1037 r = 0;
1038 switch (reg->id) {
Mihai Caraman3840edc2014-08-20 16:36:25 +03001039#ifdef CONFIG_ALTIVEC
1040 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1041 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1042 r = -ENXIO;
1043 break;
1044 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001045 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
Mihai Caraman3840edc2014-08-20 16:36:25 +03001046 break;
1047 case KVM_REG_PPC_VSCR:
1048 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1049 r = -ENXIO;
1050 break;
1051 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001052 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001053 break;
1054 case KVM_REG_PPC_VRSAVE:
Greg Kurzb4d7f162016-01-13 18:28:17 +01001055 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1056 r = -ENXIO;
1057 break;
1058 }
1059 vcpu->arch.vrsave = set_reg_val(reg->id, val);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001060 break;
1061#endif /* CONFIG_ALTIVEC */
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001062 default:
1063 r = -EINVAL;
1064 break;
1065 }
1066 }
1067
1068 return r;
1069}
1070
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001071int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1072{
1073 int r;
1074 sigset_t sigsaved;
1075
1076 if (vcpu->sigset_active)
1077 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1078
1079 if (vcpu->mmio_needed) {
1080 if (!vcpu->mmio_is_write)
1081 kvmppc_complete_mmio_load(vcpu, run);
1082 vcpu->mmio_needed = 0;
Alexander Grafad0a0482010-03-24 21:48:30 +01001083 } else if (vcpu->arch.osi_needed) {
1084 u64 *gprs = run->osi.gprs;
1085 int i;
1086
1087 for (i = 0; i < 32; i++)
1088 kvmppc_set_gpr(vcpu, i, gprs[i]);
1089 vcpu->arch.osi_needed = 0;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001090 } else if (vcpu->arch.hcall_needed) {
1091 int i;
1092
1093 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1094 for (i = 0; i < 9; ++i)
1095 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1096 vcpu->arch.hcall_needed = 0;
Alexander Graf1c810632013-01-04 18:12:48 +01001097#ifdef CONFIG_BOOKE
1098 } else if (vcpu->arch.epr_needed) {
1099 kvmppc_set_epr(vcpu, run->epr.epr);
1100 vcpu->arch.epr_needed = 0;
1101#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001102 }
1103
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001104 r = kvmppc_vcpu_run(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001105
1106 if (vcpu->sigset_active)
1107 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1108
1109 return r;
1110}
1111
1112int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1113{
Paul Mackerras19ccb762011-07-23 17:42:46 +10001114 if (irq->irq == KVM_INTERRUPT_UNSET) {
Paul Mackerras4fe27d22013-02-14 14:00:25 +00001115 kvmppc_core_dequeue_external(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001116 return 0;
1117 }
Hollis Blanchard45c5eb62008-04-25 17:55:49 -05001118
Paul Mackerras19ccb762011-07-23 17:42:46 +10001119 kvmppc_core_queue_external(vcpu, irq);
Christoffer Dallb6d33832012-03-08 16:44:24 -05001120
Scott Wooddfd4d472011-11-17 12:39:59 +00001121 kvm_vcpu_kick(vcpu);
Hollis Blanchard45c5eb62008-04-25 17:55:49 -05001122
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001123 return 0;
1124}
1125
Alexander Graf71fbfd52010-03-24 21:48:29 +01001126static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1127 struct kvm_enable_cap *cap)
1128{
1129 int r;
1130
1131 if (cap->flags)
1132 return -EINVAL;
1133
1134 switch (cap->cap) {
Alexander Grafad0a0482010-03-24 21:48:30 +01001135 case KVM_CAP_PPC_OSI:
1136 r = 0;
1137 vcpu->arch.osi_enabled = true;
1138 break;
Alexander Graf930b4122011-08-08 17:29:42 +02001139 case KVM_CAP_PPC_PAPR:
1140 r = 0;
1141 vcpu->arch.papr_enabled = true;
1142 break;
Alexander Graf1c810632013-01-04 18:12:48 +01001143 case KVM_CAP_PPC_EPR:
1144 r = 0;
Scott Wood5df554ad2013-04-12 14:08:46 +00001145 if (cap->args[0])
1146 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1147 else
1148 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
Alexander Graf1c810632013-01-04 18:12:48 +01001149 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001150#ifdef CONFIG_BOOKE
1151 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1152 r = 0;
1153 vcpu->arch.watchdog_enabled = true;
1154 break;
1155#endif
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001156#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -05001157 case KVM_CAP_SW_TLB: {
1158 struct kvm_config_tlb cfg;
1159 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1160
1161 r = -EFAULT;
1162 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1163 break;
1164
1165 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1166 break;
1167 }
1168#endif
Scott Woodeb1e4f42013-04-12 14:08:47 +00001169#ifdef CONFIG_KVM_MPIC
1170 case KVM_CAP_IRQ_MPIC: {
Al Viro70abade2013-08-30 15:04:22 -04001171 struct fd f;
Scott Woodeb1e4f42013-04-12 14:08:47 +00001172 struct kvm_device *dev;
1173
1174 r = -EBADF;
Al Viro70abade2013-08-30 15:04:22 -04001175 f = fdget(cap->args[0]);
1176 if (!f.file)
Scott Woodeb1e4f42013-04-12 14:08:47 +00001177 break;
1178
1179 r = -EPERM;
Al Viro70abade2013-08-30 15:04:22 -04001180 dev = kvm_device_from_filp(f.file);
Scott Woodeb1e4f42013-04-12 14:08:47 +00001181 if (dev)
1182 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1183
Al Viro70abade2013-08-30 15:04:22 -04001184 fdput(f);
Scott Woodeb1e4f42013-04-12 14:08:47 +00001185 break;
1186 }
1187#endif
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001188#ifdef CONFIG_KVM_XICS
1189 case KVM_CAP_IRQ_XICS: {
Al Viro70abade2013-08-30 15:04:22 -04001190 struct fd f;
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001191 struct kvm_device *dev;
1192
1193 r = -EBADF;
Al Viro70abade2013-08-30 15:04:22 -04001194 f = fdget(cap->args[0]);
1195 if (!f.file)
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001196 break;
1197
1198 r = -EPERM;
Al Viro70abade2013-08-30 15:04:22 -04001199 dev = kvm_device_from_filp(f.file);
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001200 if (dev)
1201 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1202
Al Viro70abade2013-08-30 15:04:22 -04001203 fdput(f);
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001204 break;
1205 }
1206#endif /* CONFIG_KVM_XICS */
Alexander Graf71fbfd52010-03-24 21:48:29 +01001207 default:
1208 r = -EINVAL;
1209 break;
1210 }
1211
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001212 if (!r)
1213 r = kvmppc_sanity_check(vcpu);
1214
Alexander Graf71fbfd52010-03-24 21:48:29 +01001215 return r;
1216}
1217
Paul Mackerras34a75b02016-08-10 11:27:27 +10001218bool kvm_arch_intc_initialized(struct kvm *kvm)
1219{
1220#ifdef CONFIG_KVM_MPIC
1221 if (kvm->arch.mpic)
1222 return true;
1223#endif
1224#ifdef CONFIG_KVM_XICS
1225 if (kvm->arch.xics)
1226 return true;
1227#endif
1228 return false;
1229}
1230
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001231int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1232 struct kvm_mp_state *mp_state)
1233{
1234 return -EINVAL;
1235}
1236
1237int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1238 struct kvm_mp_state *mp_state)
1239{
1240 return -EINVAL;
1241}
1242
1243long kvm_arch_vcpu_ioctl(struct file *filp,
1244 unsigned int ioctl, unsigned long arg)
1245{
1246 struct kvm_vcpu *vcpu = filp->private_data;
1247 void __user *argp = (void __user *)arg;
1248 long r;
1249
Avi Kivity93736622010-05-13 12:35:17 +03001250 switch (ioctl) {
1251 case KVM_INTERRUPT: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001252 struct kvm_interrupt irq;
1253 r = -EFAULT;
1254 if (copy_from_user(&irq, argp, sizeof(irq)))
Avi Kivity93736622010-05-13 12:35:17 +03001255 goto out;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001256 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
Avi Kivity93736622010-05-13 12:35:17 +03001257 goto out;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001258 }
Avi Kivity19483d12010-05-13 12:30:43 +03001259
Alexander Graf71fbfd52010-03-24 21:48:29 +01001260 case KVM_ENABLE_CAP:
1261 {
1262 struct kvm_enable_cap cap;
1263 r = -EFAULT;
1264 if (copy_from_user(&cap, argp, sizeof(cap)))
1265 goto out;
1266 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1267 break;
1268 }
Scott Wooddc83b8b2011-08-18 15:25:21 -05001269
Alexander Grafe24ed812011-09-14 10:02:41 +02001270 case KVM_SET_ONE_REG:
1271 case KVM_GET_ONE_REG:
1272 {
1273 struct kvm_one_reg reg;
1274 r = -EFAULT;
1275 if (copy_from_user(&reg, argp, sizeof(reg)))
1276 goto out;
1277 if (ioctl == KVM_SET_ONE_REG)
1278 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1279 else
1280 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1281 break;
1282 }
1283
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001284#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -05001285 case KVM_DIRTY_TLB: {
1286 struct kvm_dirty_tlb dirty;
1287 r = -EFAULT;
1288 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1289 goto out;
1290 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1291 break;
1292 }
1293#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001294 default:
1295 r = -EINVAL;
1296 }
1297
1298out:
1299 return r;
1300}
1301
Carsten Otte5b1c1492012-01-04 10:25:23 +01001302int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1303{
1304 return VM_FAULT_SIGBUS;
1305}
1306
Alexander Graf15711e92010-07-29 14:48:08 +02001307static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1308{
Stuart Yoder784bafa2012-07-03 05:48:51 +00001309 u32 inst_nop = 0x60000000;
1310#ifdef CONFIG_KVM_BOOKE_HV
1311 u32 inst_sc1 = 0x44000022;
Alexander Graf27431032014-04-24 13:39:16 +02001312 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1313 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1314 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1315 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
Stuart Yoder784bafa2012-07-03 05:48:51 +00001316#else
Alexander Graf15711e92010-07-29 14:48:08 +02001317 u32 inst_lis = 0x3c000000;
1318 u32 inst_ori = 0x60000000;
Alexander Graf15711e92010-07-29 14:48:08 +02001319 u32 inst_sc = 0x44000002;
1320 u32 inst_imm_mask = 0xffff;
1321
1322 /*
1323 * The hypercall to get into KVM from within guest context is as
1324 * follows:
1325 *
1326 * lis r0, r0, KVM_SC_MAGIC_R0@h
1327 * ori r0, KVM_SC_MAGIC_R0@l
1328 * sc
1329 * nop
1330 */
Alexander Graf27431032014-04-24 13:39:16 +02001331 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1332 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1333 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1334 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
Stuart Yoder784bafa2012-07-03 05:48:51 +00001335#endif
Alexander Graf15711e92010-07-29 14:48:08 +02001336
Liu Yu-B132019202e072012-07-03 05:48:52 +00001337 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1338
Alexander Graf15711e92010-07-29 14:48:08 +02001339 return 0;
1340}
1341
Alexander Graf5efdb4b2013-04-17 00:37:57 +02001342int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1343 bool line_status)
1344{
1345 if (!irqchip_in_kernel(kvm))
1346 return -ENXIO;
1347
1348 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1349 irq_event->irq, irq_event->level,
1350 line_status);
1351 return 0;
1352}
1353
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001354
1355static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1356 struct kvm_enable_cap *cap)
1357{
1358 int r;
1359
1360 if (cap->flags)
1361 return -EINVAL;
1362
1363 switch (cap->cap) {
1364#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1365 case KVM_CAP_PPC_ENABLE_HCALL: {
1366 unsigned long hcall = cap->args[0];
1367
1368 r = -EINVAL;
1369 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1370 cap->args[1] > 1)
1371 break;
Paul Mackerrasae2113a2014-06-02 11:03:00 +10001372 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1373 break;
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001374 if (cap->args[1])
1375 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1376 else
1377 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1378 r = 0;
1379 break;
1380 }
1381#endif
1382 default:
1383 r = -EINVAL;
1384 break;
1385 }
1386
1387 return r;
1388}
1389
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001390long kvm_arch_vm_ioctl(struct file *filp,
1391 unsigned int ioctl, unsigned long arg)
1392{
Scott Wood5df554ad2013-04-12 14:08:46 +00001393 struct kvm *kvm __maybe_unused = filp->private_data;
Alexander Graf15711e92010-07-29 14:48:08 +02001394 void __user *argp = (void __user *)arg;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001395 long r;
1396
1397 switch (ioctl) {
Alexander Graf15711e92010-07-29 14:48:08 +02001398 case KVM_PPC_GET_PVINFO: {
1399 struct kvm_ppc_pvinfo pvinfo;
Vasiliy Kulikovd8cdddc2010-10-30 13:04:24 +04001400 memset(&pvinfo, 0, sizeof(pvinfo));
Alexander Graf15711e92010-07-29 14:48:08 +02001401 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1402 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1403 r = -EFAULT;
1404 goto out;
1405 }
1406
1407 break;
1408 }
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001409 case KVM_ENABLE_CAP:
1410 {
1411 struct kvm_enable_cap cap;
1412 r = -EFAULT;
1413 if (copy_from_user(&cap, argp, sizeof(cap)))
1414 goto out;
1415 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1416 break;
1417 }
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001418#ifdef CONFIG_PPC_BOOK3S_64
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11001419 case KVM_CREATE_SPAPR_TCE_64: {
1420 struct kvm_create_spapr_tce_64 create_tce_64;
1421
1422 r = -EFAULT;
1423 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
1424 goto out;
1425 if (create_tce_64.flags) {
1426 r = -EINVAL;
1427 goto out;
1428 }
1429 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1430 goto out;
1431 }
David Gibson54738c02011-06-29 00:22:41 +00001432 case KVM_CREATE_SPAPR_TCE: {
1433 struct kvm_create_spapr_tce create_tce;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11001434 struct kvm_create_spapr_tce_64 create_tce_64;
David Gibson54738c02011-06-29 00:22:41 +00001435
1436 r = -EFAULT;
1437 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1438 goto out;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11001439
1440 create_tce_64.liobn = create_tce.liobn;
1441 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
1442 create_tce_64.offset = 0;
1443 create_tce_64.size = create_tce.window_size >>
1444 IOMMU_PAGE_SHIFT_4K;
1445 create_tce_64.flags = 0;
1446 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
David Gibson54738c02011-06-29 00:22:41 +00001447 goto out;
1448 }
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001449 case KVM_PPC_GET_SMMU_INFO: {
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001450 struct kvm_ppc_smmu_info info;
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301451 struct kvm *kvm = filp->private_data;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001452
1453 memset(&info, 0, sizeof(info));
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301454 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001455 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1456 r = -EFAULT;
1457 break;
1458 }
Michael Ellerman8e591cb2013-04-17 20:30:00 +00001459 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1460 struct kvm *kvm = filp->private_data;
1461
1462 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1463 break;
1464 }
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301465 default: {
1466 struct kvm *kvm = filp->private_data;
1467 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1468 }
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301469#else /* CONFIG_PPC_BOOK3S_64 */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001470 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001471 r = -ENOTTY;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301472#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001473 }
Alexander Graf15711e92010-07-29 14:48:08 +02001474out:
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001475 return r;
1476}
1477
Scott Wood043cc4d2011-12-20 15:34:20 +00001478static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1479static unsigned long nr_lpids;
1480
1481long kvmppc_alloc_lpid(void)
1482{
1483 long lpid;
1484
1485 do {
1486 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1487 if (lpid >= nr_lpids) {
1488 pr_err("%s: No LPIDs free\n", __func__);
1489 return -ENOMEM;
1490 }
1491 } while (test_and_set_bit(lpid, lpid_inuse));
1492
1493 return lpid;
1494}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301495EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001496
1497void kvmppc_claim_lpid(long lpid)
1498{
1499 set_bit(lpid, lpid_inuse);
1500}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301501EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001502
1503void kvmppc_free_lpid(long lpid)
1504{
1505 clear_bit(lpid, lpid_inuse);
1506}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301507EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001508
1509void kvmppc_init_lpid(unsigned long nr_lpids_param)
1510{
1511 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1512 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1513}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301514EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00001515
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001516int kvm_arch_init(void *opaque)
1517{
1518 return 0;
1519}
1520
Paolo Bonzini478d66862014-08-05 11:29:07 +02001521EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);