blob: f54926c78320a35f82f8120ca3f5d9768bad6fdb [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050024#include <linux/vmalloc.h>
Alexander Graf544c6762009-11-02 12:02:31 +000025#include <linux/hrtimer.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010026#include <linux/sched/signal.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050027#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Scott Woodeb1e4f42013-04-12 14:08:47 +000029#include <linux/file.h>
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +053030#include <linux/module.h>
Suresh Warrier95767302016-08-19 15:35:47 +100031#include <linux/irqbypass.h>
32#include <linux/kvm_irqfd.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050033#include <asm/cputable.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080034#include <linux/uaccess.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050035#include <asm/kvm_ppc.h>
Paul Mackerras371fefd2011-06-29 00:23:08 +000036#include <asm/cputhreads.h>
Alexander Grafbd2be682012-08-13 01:04:19 +020037#include <asm/irqflags.h>
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +110038#include <asm/iommu.h>
Bin Lu6f63e812017-02-21 21:12:36 +080039#include <asm/switch_to.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100040#include <asm/xive.h>
Paul Mackerras3214d012018-01-15 16:06:47 +110041#ifdef CONFIG_PPC_PSERIES
42#include <asm/hvcall.h>
43#include <asm/plpar_wrappers.h>
44#endif
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100045
Hollis Blanchard73e75b42008-12-02 15:51:57 -060046#include "timing.h"
Alexander Graf5efdb4b2013-04-17 00:37:57 +020047#include "irq.h"
Paul Mackerrasfad7b9b2008-12-23 14:57:26 +110048#include "../mm/mmu_decl.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050049
Marcelo Tosatti46f43c62009-06-18 11:47:27 -030050#define CREATE_TRACE_POINTS
51#include "trace.h"
52
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +053053struct kvmppc_ops *kvmppc_hv_ops;
54EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
55struct kvmppc_ops *kvmppc_pr_ops;
56EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
57
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053058
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050059int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
60{
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +020061 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050062}
63
Longpeng(Mike)199b5762017-08-08 12:05:32 +080064bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
65{
66 return false;
67}
68
Christoffer Dallb6d33832012-03-08 16:44:24 -050069int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
70{
71 return 1;
72}
73
Alexander Graf03d25c52012-08-10 12:28:50 +020074/*
75 * Common checks before entering the guest world. Call with interrupts
76 * disabled.
77 *
Alexander Graf7ee78852012-08-13 12:44:41 +020078 * returns:
79 *
80 * == 1 if we're ready to go into guest state
81 * <= 0 if we need to go back to the host with return value
Alexander Graf03d25c52012-08-10 12:28:50 +020082 */
83int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
84{
Scott Wood6c85f522014-01-09 19:18:40 -060085 int r;
Alexander Graf03d25c52012-08-10 12:28:50 +020086
Scott Wood6c85f522014-01-09 19:18:40 -060087 WARN_ON(irqs_disabled());
88 hard_irq_disable();
89
Alexander Graf03d25c52012-08-10 12:28:50 +020090 while (true) {
91 if (need_resched()) {
92 local_irq_enable();
93 cond_resched();
Scott Wood6c85f522014-01-09 19:18:40 -060094 hard_irq_disable();
Alexander Graf03d25c52012-08-10 12:28:50 +020095 continue;
96 }
97
98 if (signal_pending(current)) {
Alexander Graf7ee78852012-08-13 12:44:41 +020099 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
100 vcpu->run->exit_reason = KVM_EXIT_INTR;
101 r = -EINTR;
Alexander Graf03d25c52012-08-10 12:28:50 +0200102 break;
103 }
104
Scott Wood5bd1cf12012-08-22 15:03:50 +0000105 vcpu->mode = IN_GUEST_MODE;
106
107 /*
108 * Reading vcpu->requests must happen after setting vcpu->mode,
109 * so we don't miss a request because the requester sees
110 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
111 * before next entering the guest (and thus doesn't IPI).
Lan Tianyu489153c2016-03-13 11:10:30 +0800112 * This also orders the write to mode from any reads
113 * to the page tables done while the VCPU is running.
114 * Please see the comment in kvm_flush_remote_tlbs.
Scott Wood5bd1cf12012-08-22 15:03:50 +0000115 */
Alexander Graf03d25c52012-08-10 12:28:50 +0200116 smp_mb();
Scott Wood5bd1cf12012-08-22 15:03:50 +0000117
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +0200118 if (kvm_request_pending(vcpu)) {
Alexander Graf03d25c52012-08-10 12:28:50 +0200119 /* Make sure we process requests preemptable */
120 local_irq_enable();
121 trace_kvm_check_requests(vcpu);
Alexander Graf7c973a22012-08-13 12:50:35 +0200122 r = kvmppc_core_check_requests(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -0600123 hard_irq_disable();
Alexander Graf7c973a22012-08-13 12:50:35 +0200124 if (r > 0)
125 continue;
126 break;
Alexander Graf03d25c52012-08-10 12:28:50 +0200127 }
128
129 if (kvmppc_core_prepare_to_enter(vcpu)) {
130 /* interrupts got enabled in between, so we
131 are back at square 1 */
132 continue;
133 }
134
Paolo Bonzini6edaa532016-06-15 15:18:26 +0200135 guest_enter_irqoff();
Scott Wood6c85f522014-01-09 19:18:40 -0600136 return 1;
Alexander Graf03d25c52012-08-10 12:28:50 +0200137 }
138
Scott Wood6c85f522014-01-09 19:18:40 -0600139 /* return to host */
140 local_irq_enable();
Alexander Graf03d25c52012-08-10 12:28:50 +0200141 return r;
142}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530143EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
Alexander Graf03d25c52012-08-10 12:28:50 +0200144
Alexander Graf5deb8e72014-04-24 13:46:24 +0200145#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
146static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
147{
148 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
149 int i;
150
151 shared->sprg0 = swab64(shared->sprg0);
152 shared->sprg1 = swab64(shared->sprg1);
153 shared->sprg2 = swab64(shared->sprg2);
154 shared->sprg3 = swab64(shared->sprg3);
155 shared->srr0 = swab64(shared->srr0);
156 shared->srr1 = swab64(shared->srr1);
157 shared->dar = swab64(shared->dar);
158 shared->msr = swab64(shared->msr);
159 shared->dsisr = swab32(shared->dsisr);
160 shared->int_pending = swab32(shared->int_pending);
161 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
162 shared->sr[i] = swab32(shared->sr[i]);
163}
164#endif
165
Alexander Graf2a342ed2010-07-29 14:47:48 +0200166int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
167{
168 int nr = kvmppc_get_gpr(vcpu, 11);
169 int r;
170 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
171 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
172 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
173 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
174 unsigned long r2 = 0;
175
Alexander Graf5deb8e72014-04-24 13:46:24 +0200176 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
Alexander Graf2a342ed2010-07-29 14:47:48 +0200177 /* 32 bit mode */
178 param1 &= 0xffffffff;
179 param2 &= 0xffffffff;
180 param3 &= 0xffffffff;
181 param4 &= 0xffffffff;
182 }
183
184 switch (nr) {
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000185 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
Alexander Graf5fc87402010-07-29 14:47:55 +0200186 {
Alexander Graf5deb8e72014-04-24 13:46:24 +0200187#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
188 /* Book3S can be little endian, find it out here */
189 int shared_big_endian = true;
190 if (vcpu->arch.intr_msr & MSR_LE)
191 shared_big_endian = false;
192 if (shared_big_endian != vcpu->arch.shared_big_endian)
193 kvmppc_swab_shared(vcpu);
194 vcpu->arch.shared_big_endian = shared_big_endian;
195#endif
196
Alexander Graff3383cf2014-05-12 01:08:32 +0200197 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
198 /*
199 * Older versions of the Linux magic page code had
200 * a bug where they would map their trampoline code
201 * NX. If that's the case, remove !PR NX capability.
202 */
203 vcpu->arch.disable_kernel_nx = true;
204 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
205 }
206
207 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
208 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
Alexander Graf5fc87402010-07-29 14:47:55 +0200209
Alexander Graf89b68c92014-07-13 16:37:12 +0200210#ifdef CONFIG_PPC_64K_PAGES
211 /*
212 * Make sure our 4k magic page is in the same window of a 64k
213 * page within the guest and within the host's page.
214 */
215 if ((vcpu->arch.magic_page_pa & 0xf000) !=
216 ((ulong)vcpu->arch.shared & 0xf000)) {
217 void *old_shared = vcpu->arch.shared;
218 ulong shared = (ulong)vcpu->arch.shared;
219 void *new_shared;
220
221 shared &= PAGE_MASK;
222 shared |= vcpu->arch.magic_page_pa & 0xf000;
223 new_shared = (void*)shared;
224 memcpy(new_shared, old_shared, 0x1000);
225 vcpu->arch.shared = new_shared;
226 }
227#endif
228
Scott Woodb5904972011-11-08 18:23:30 -0600229 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
Alexander Graf7508e162010-08-03 11:32:56 +0200230
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000231 r = EV_SUCCESS;
Alexander Graf5fc87402010-07-29 14:47:55 +0200232 break;
233 }
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000234 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
235 r = EV_SUCCESS;
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000236#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
Alexander Graf5fc87402010-07-29 14:47:55 +0200237 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
238#endif
Alexander Graf2a342ed2010-07-29 14:47:48 +0200239
240 /* Second return value is in r4 */
Alexander Graf2a342ed2010-07-29 14:47:48 +0200241 break;
Liu Yu-B132019202e072012-07-03 05:48:52 +0000242 case EV_HCALL_TOKEN(EV_IDLE):
243 r = EV_SUCCESS;
244 kvm_vcpu_block(vcpu);
Radim Krčmář72875d82017-04-26 22:32:19 +0200245 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Liu Yu-B132019202e072012-07-03 05:48:52 +0000246 break;
Alexander Graf2a342ed2010-07-29 14:47:48 +0200247 default:
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000248 r = EV_UNIMPLEMENTED;
Alexander Graf2a342ed2010-07-29 14:47:48 +0200249 break;
250 }
251
Alexander Graf7508e162010-08-03 11:32:56 +0200252 kvmppc_set_gpr(vcpu, 4, r2);
253
Alexander Graf2a342ed2010-07-29 14:47:48 +0200254 return r;
255}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530256EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500257
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200258int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
259{
260 int r = false;
261
262 /* We have to know what CPU to virtualize */
263 if (!vcpu->arch.pvr)
264 goto out;
265
266 /* PAPR only works with book3s_64 */
267 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
268 goto out;
269
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200270 /* HV KVM can only do PAPR mode for now */
Aneesh Kumar K.Va78b55d2013-10-07 22:18:02 +0530271 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200272 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200273
Scott Woodd30f6e42011-12-20 15:34:43 +0000274#ifdef CONFIG_KVM_BOOKE_HV
275 if (!cpu_has_feature(CPU_FTR_EMB_HV))
276 goto out;
277#endif
278
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200279 r = true;
280
281out:
282 vcpu->arch.sane = r;
283 return r ? 0 : -EINVAL;
284}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530285EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200286
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500287int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
288{
289 enum emulation_result er;
290 int r;
291
Alexander Grafd69614a2014-06-18 14:53:49 +0200292 er = kvmppc_emulate_loadstore(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500293 switch (er) {
294 case EMULATE_DONE:
295 /* Future optimization: only reload non-volatiles if they were
296 * actually modified. */
297 r = RESUME_GUEST_NV;
298 break;
Mihai Caraman51f04722014-07-23 19:06:21 +0300299 case EMULATE_AGAIN:
300 r = RESUME_GUEST;
301 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500302 case EMULATE_DO_MMIO:
303 run->exit_reason = KVM_EXIT_MMIO;
304 /* We must reload nonvolatiles because "update" load/store
305 * instructions modify register state. */
306 /* Future optimization: only reload non-volatiles if they were
307 * actually modified. */
308 r = RESUME_HOST_NV;
309 break;
310 case EMULATE_FAIL:
Mihai Caraman51f04722014-07-23 19:06:21 +0300311 {
312 u32 last_inst;
313
Alexander Graf8d0eff62014-09-10 14:37:29 +0200314 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500315 /* XXX Deliver Program interrupt to guest. */
Mihai Caraman51f04722014-07-23 19:06:21 +0300316 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500317 r = RESUME_HOST;
318 break;
Mihai Caraman51f04722014-07-23 19:06:21 +0300319 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500320 default:
Alexander Graf5a331692012-12-14 23:46:03 +0100321 WARN_ON(1);
322 r = RESUME_GUEST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500323 }
324
325 return r;
326}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530327EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500328
Alexander Graf35c4a732014-06-20 13:58:16 +0200329int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
330 bool data)
331{
Alexander Grafc12fb432014-06-20 14:43:36 +0200332 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
Alexander Graf35c4a732014-06-20 13:58:16 +0200333 struct kvmppc_pte pte;
Suraj Jitindar Singhcc6929c2018-12-14 16:29:07 +1100334 int r = -EINVAL;
Alexander Graf35c4a732014-06-20 13:58:16 +0200335
336 vcpu->stat.st++;
337
Suraj Jitindar Singhcc6929c2018-12-14 16:29:07 +1100338 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
339 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
340 size);
341
342 if ((!r) || (r == -EAGAIN))
343 return r;
344
Alexander Graf35c4a732014-06-20 13:58:16 +0200345 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
346 XLATE_WRITE, &pte);
347 if (r < 0)
348 return r;
349
350 *eaddr = pte.raddr;
351
352 if (!pte.may_write)
353 return -EPERM;
354
Alexander Grafc12fb432014-06-20 14:43:36 +0200355 /* Magic page override */
356 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
357 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
358 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
359 void *magic = vcpu->arch.shared;
360 magic += pte.eaddr & 0xfff;
361 memcpy(magic, ptr, size);
362 return EMULATE_DONE;
363 }
364
Alexander Graf35c4a732014-06-20 13:58:16 +0200365 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
366 return EMULATE_DO_MMIO;
367
368 return EMULATE_DONE;
369}
370EXPORT_SYMBOL_GPL(kvmppc_st);
371
372int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
373 bool data)
374{
Alexander Grafc12fb432014-06-20 14:43:36 +0200375 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
Alexander Graf35c4a732014-06-20 13:58:16 +0200376 struct kvmppc_pte pte;
Suraj Jitindar Singhcc6929c2018-12-14 16:29:07 +1100377 int rc = -EINVAL;
Alexander Graf35c4a732014-06-20 13:58:16 +0200378
379 vcpu->stat.ld++;
380
Suraj Jitindar Singhcc6929c2018-12-14 16:29:07 +1100381 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
382 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
383 size);
384
385 if ((!rc) || (rc == -EAGAIN))
386 return rc;
387
Alexander Graf35c4a732014-06-20 13:58:16 +0200388 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
389 XLATE_READ, &pte);
390 if (rc)
391 return rc;
392
393 *eaddr = pte.raddr;
394
395 if (!pte.may_read)
396 return -EPERM;
397
398 if (!data && !pte.may_execute)
399 return -ENOEXEC;
400
Alexander Grafc12fb432014-06-20 14:43:36 +0200401 /* Magic page override */
402 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
403 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
404 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
405 void *magic = vcpu->arch.shared;
406 magic += pte.eaddr & 0xfff;
407 memcpy(ptr, magic, size);
408 return EMULATE_DONE;
409 }
410
Alexander Grafc45c5512014-06-20 14:17:30 +0200411 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
412 return EMULATE_DO_MMIO;
Alexander Graf35c4a732014-06-20 13:58:16 +0200413
414 return EMULATE_DONE;
Alexander Graf35c4a732014-06-20 13:58:16 +0200415}
416EXPORT_SYMBOL_GPL(kvmppc_ld);
417
Radim Krčmář13a34e02014-08-28 15:13:03 +0200418int kvm_arch_hardware_enable(void)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500419{
Alexander Graf10474ae2009-09-15 11:37:46 +0200420 return 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500421}
422
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500423int kvm_arch_hardware_setup(void)
424{
425 return 0;
426}
427
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500428void kvm_arch_check_processor_compat(void *rtn)
429{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600430 *(int *)rtn = kvmppc_core_check_processor_compat();
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500431}
432
Carsten Ottee08b9632012-01-04 10:25:20 +0100433int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500434{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530435 struct kvmppc_ops *kvm_ops = NULL;
436 /*
437 * if we have both HV and PR enabled, default is HV
438 */
439 if (type == 0) {
440 if (kvmppc_hv_ops)
441 kvm_ops = kvmppc_hv_ops;
442 else
443 kvm_ops = kvmppc_pr_ops;
444 if (!kvm_ops)
445 goto err_out;
446 } else if (type == KVM_VM_PPC_HV) {
447 if (!kvmppc_hv_ops)
448 goto err_out;
449 kvm_ops = kvmppc_hv_ops;
450 } else if (type == KVM_VM_PPC_PR) {
451 if (!kvmppc_pr_ops)
452 goto err_out;
453 kvm_ops = kvmppc_pr_ops;
454 } else
455 goto err_out;
Carsten Ottee08b9632012-01-04 10:25:20 +0100456
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530457 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
458 return -ENOENT;
459
460 kvm->arch.kvm_ops = kvm_ops;
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000461 return kvmppc_core_init_vm(kvm);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530462err_out:
463 return -EINVAL;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500464}
465
Luiz Capitulino235539b2016-09-07 14:47:23 -0400466bool kvm_arch_has_vcpu_debugfs(void)
467{
468 return false;
469}
470
471int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
472{
473 return 0;
474}
475
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100476void kvm_arch_destroy_vm(struct kvm *kvm)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500477{
478 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300479 struct kvm_vcpu *vcpu;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500480
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600481#ifdef CONFIG_KVM_XICS
482 /*
483 * We call kick_all_cpus_sync() to ensure that all
484 * CPUs have executed any pending IPIs before we
485 * continue and free VCPUs structures below.
486 */
487 if (is_kvmppc_hv_enabled(kvm))
488 kick_all_cpus_sync();
489#endif
490
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300491 kvm_for_each_vcpu(i, vcpu, kvm)
492 kvm_arch_vcpu_free(vcpu);
493
494 mutex_lock(&kvm->lock);
495 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
496 kvm->vcpus[i] = NULL;
497
498 atomic_set(&kvm->online_vcpus, 0);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000499
500 kvmppc_core_destroy_vm(kvm);
501
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300502 mutex_unlock(&kvm->lock);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530503
504 /* drop the module reference */
505 module_put(kvm->arch.kvm_ops->owner);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500506}
507
Alexander Graf784aa3d2014-07-14 18:27:35 +0200508int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500509{
510 int r;
Alexander Graf7a587772014-07-14 18:55:19 +0200511 /* Assume we're using HV mode when the HV module is loaded */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530512 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500513
Alexander Graf7a587772014-07-14 18:55:19 +0200514 if (kvm) {
515 /*
516 * Hooray - we know which VM type we're running on. Depend on
517 * that rather than the guess above.
518 */
519 hv_enabled = is_kvmppc_hv_enabled(kvm);
520 }
521
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500522 switch (ext) {
Scott Wood5ce941e2011-04-27 17:24:21 -0500523#ifdef CONFIG_BOOKE
524 case KVM_CAP_PPC_BOOKE_SREGS:
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000525 case KVM_CAP_PPC_BOOKE_WATCHDOG:
Alexander Graf1c810632013-01-04 18:12:48 +0100526 case KVM_CAP_PPC_EPR:
Scott Wood5ce941e2011-04-27 17:24:21 -0500527#else
Alexander Grafe15a1132009-11-30 03:02:02 +0000528 case KVM_CAP_PPC_SEGSTATE:
Alexander Graf1022fc32011-09-14 21:45:23 +0200529 case KVM_CAP_PPC_HIOR:
Alexander Graf930b4122011-08-08 17:29:42 +0200530 case KVM_CAP_PPC_PAPR:
Scott Wood5ce941e2011-04-27 17:24:21 -0500531#endif
Alexander Graf18978762010-03-24 21:48:18 +0100532 case KVM_CAP_PPC_UNSET_IRQ:
Alexander Graf7b4203e2010-08-30 13:50:45 +0200533 case KVM_CAP_PPC_IRQ_LEVEL:
Alexander Graf71fbfd52010-03-24 21:48:29 +0100534 case KVM_CAP_ENABLE_CAP:
Alexander Grafe24ed812011-09-14 10:02:41 +0200535 case KVM_CAP_ONE_REG:
Alexander Graf0e673fb2012-10-09 00:06:20 +0200536 case KVM_CAP_IOEVENTFD:
Scott Wood5df554ad2013-04-12 14:08:46 +0000537 case KVM_CAP_DEVICE_CTRL:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100538 case KVM_CAP_IMMEDIATE_EXIT:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000539 r = 1;
540 break;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000541 case KVM_CAP_PPC_PAIRED_SINGLES:
Alexander Grafad0a0482010-03-24 21:48:30 +0100542 case KVM_CAP_PPC_OSI:
Alexander Graf15711e92010-07-29 14:48:08 +0200543 case KVM_CAP_PPC_GET_PVINFO:
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000544#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -0500545 case KVM_CAP_SW_TLB:
546#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530547 /* We support this only for PR */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530548 r = !hv_enabled;
Alexander Grafe15a1132009-11-30 03:02:02 +0000549 break;
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530550#ifdef CONFIG_KVM_MPIC
551 case KVM_CAP_IRQ_MPIC:
552 r = 1;
553 break;
554#endif
555
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000556#ifdef CONFIG_PPC_BOOK3S_64
David Gibson54738c02011-06-29 00:22:41 +0000557 case KVM_CAP_SPAPR_TCE:
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +1100558 case KVM_CAP_SPAPR_TCE_64:
Suraj Jitindar Singh693ac102018-12-14 16:29:03 +1100559 r = 1;
560 break;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100561 case KVM_CAP_SPAPR_TCE_VFIO:
Suraj Jitindar Singh693ac102018-12-14 16:29:03 +1100562 r = !!cpu_has_feature(CPU_FTR_HVMODE);
563 break;
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000564 case KVM_CAP_PPC_RTAS:
Alexander Graff2e91042014-05-22 17:40:15 +0200565 case KVM_CAP_PPC_FIXUP_HCALL:
Paul Mackerras699a0ea2014-06-02 11:02:59 +1000566 case KVM_CAP_PPC_ENABLE_HCALL:
Paul Mackerras5975a2e2013-04-27 00:28:37 +0000567#ifdef CONFIG_KVM_XICS
568 case KVM_CAP_IRQ_XICS:
569#endif
Paul Mackerras3214d012018-01-15 16:06:47 +1100570 case KVM_CAP_PPC_GET_CPU_CHAR:
David Gibson54738c02011-06-29 00:22:41 +0000571 r = 1;
572 break;
Cédric Le Goatereacc56b2019-04-18 12:39:28 +0200573#ifdef CONFIG_KVM_XIVE
574 case KVM_CAP_PPC_IRQ_XIVE:
575 /*
Cédric Le Goater3fab2d12019-04-18 12:39:40 +0200576 * We need XIVE to be enabled on the platform (implies
577 * a POWER9 processor) and the PowerNV platform, as
578 * nested is not yet supported.
Cédric Le Goatereacc56b2019-04-18 12:39:28 +0200579 */
Cédric Le Goater3fab2d12019-04-18 12:39:40 +0200580 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE);
Cédric Le Goatereacc56b2019-04-18 12:39:28 +0200581 break;
582#endif
David Gibsona8acaec2016-11-23 16:14:07 +1100583
584 case KVM_CAP_PPC_ALLOC_HTAB:
585 r = hv_enabled;
586 break;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000587#endif /* CONFIG_PPC_BOOK3S_64 */
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530588#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerras371fefd2011-06-29 00:23:08 +0000589 case KVM_CAP_PPC_SMT:
Paul Mackerras45c940b2016-11-18 17:43:30 +1100590 r = 0;
Paul Mackerras57900692017-05-16 16:41:20 +1000591 if (kvm) {
592 if (kvm->arch.emul_smt_mode > 1)
593 r = kvm->arch.emul_smt_mode;
594 else
595 r = kvm->arch.smt_mode;
596 } else if (hv_enabled) {
Paul Mackerras45c940b2016-11-18 17:43:30 +1100597 if (cpu_has_feature(CPU_FTR_ARCH_300))
598 r = 1;
599 else
600 r = threads_per_subcore;
601 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000602 break;
Paul Mackerras2ed4f9d2017-06-21 16:01:27 +1000603 case KVM_CAP_PPC_SMT_POSSIBLE:
604 r = 1;
605 if (hv_enabled) {
606 if (!cpu_has_feature(CPU_FTR_ARCH_300))
607 r = ((threads_per_subcore << 1) - 1);
608 else
609 /* P9 can emulate dbells, so allow any mode */
610 r = 8 | 4 | 2 | 1;
611 }
612 break;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000613 case KVM_CAP_PPC_RMA:
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100614 r = 0;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000615 break;
Michael Ellermane928e9c2015-03-20 20:39:41 +1100616 case KVM_CAP_PPC_HWRNG:
617 r = kvmppc_hwrng_present();
618 break;
Paul Mackerrasc9270132017-01-30 21:21:41 +1100619 case KVM_CAP_PPC_MMU_RADIX:
Paul Mackerras8cf4ecc2017-01-30 21:21:53 +1100620 r = !!(hv_enabled && radix_enabled());
Paul Mackerrasc9270132017-01-30 21:21:41 +1100621 break;
622 case KVM_CAP_PPC_MMU_HASH_V3:
Paul Mackerrasde760db2018-10-08 16:31:16 +1100623 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
624 cpu_has_feature(CPU_FTR_HVMODE));
Paul Mackerrasc9270132017-01-30 21:21:41 +1100625 break;
Paul Mackerrasaa069a92018-09-21 20:02:01 +1000626 case KVM_CAP_PPC_NESTED_HV:
627 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
628 !kvmppc_hv_ops->enable_nested(NULL));
629 break;
David Gibson54738c02011-06-29 00:22:41 +0000630#endif
Alexander Graff4800b12012-08-07 10:24:14 +0200631 case KVM_CAP_SYNC_MMU:
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530632#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100633 r = hv_enabled;
Alexander Graff4800b12012-08-07 10:24:14 +0200634#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
635 r = 1;
636#else
637 r = 0;
Paul Mackerrasa2932922012-11-19 22:57:20 +0000638#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530639 break;
640#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasa2932922012-11-19 22:57:20 +0000641 case KVM_CAP_PPC_HTAB_FD:
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530642 r = hv_enabled;
Paul Mackerrasa2932922012-11-19 22:57:20 +0000643 break;
Alexander Graff4800b12012-08-07 10:24:14 +0200644#endif
Matt Evansb5434032011-12-07 16:55:57 +0000645 case KVM_CAP_NR_VCPUS:
646 /*
647 * Recommending a number of CPUs is somewhat arbitrary; we
648 * return the number of present CPUs for -HV (since a host
649 * will have secondary threads "offline"), and for other KVM
650 * implementations just count online CPUs.
651 */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530652 if (hv_enabled)
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530653 r = num_present_cpus();
654 else
655 r = num_online_cpus();
Matt Evansb5434032011-12-07 16:55:57 +0000656 break;
Nikunj A Dadhaniabfec5c2c2015-10-16 10:27:53 +0530657 case KVM_CAP_NR_MEMSLOTS:
658 r = KVM_USER_MEM_SLOTS;
659 break;
Matt Evansb5434032011-12-07 16:55:57 +0000660 case KVM_CAP_MAX_VCPUS:
661 r = KVM_MAX_VCPUS;
662 break;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000663#ifdef CONFIG_PPC_BOOK3S_64
664 case KVM_CAP_PPC_GET_SMMU_INFO:
665 r = 1;
666 break;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100667 case KVM_CAP_SPAPR_MULTITCE:
668 r = 1;
669 break;
David Gibson050f2332016-12-20 16:49:07 +1100670 case KVM_CAP_SPAPR_RESIZE_HPT:
David Gibson790a9df2018-02-02 14:29:08 +1100671 r = !!hv_enabled;
David Gibson050f2332016-12-20 16:49:07 +1100672 break;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000673#endif
Aravinda Prasad134764e2017-05-11 16:32:48 +0530674#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
675 case KVM_CAP_PPC_FWNMI:
676 r = hv_enabled;
677 break;
678#endif
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100679#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Sam Bobroff23528bb2016-07-20 13:41:36 +1000680 case KVM_CAP_PPC_HTM:
Simon Guod234d682018-05-23 15:02:08 +0800681 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
682 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
Sam Bobroff23528bb2016-07-20 13:41:36 +1000683 break;
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100684#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500685 default:
686 r = 0;
687 break;
688 }
689 return r;
690
691}
692
693long kvm_arch_dev_ioctl(struct file *filp,
694 unsigned int ioctl, unsigned long arg)
695{
696 return -EINVAL;
697}
698
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530699void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900700 struct kvm_memory_slot *dont)
701{
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530702 kvmppc_core_free_memslot(kvm, free, dont);
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900703}
704
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530705int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
706 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900707{
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530708 return kvmppc_core_create_memslot(kvm, slot, npages);
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900709}
710
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200711int kvm_arch_prepare_memory_region(struct kvm *kvm,
Takuya Yoshikawa462fce42013-02-27 19:41:56 +0900712 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200713 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +0900714 enum kvm_mr_change change)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500715{
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000716 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500717}
718
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200719void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200720 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900721 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200722 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900723 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200724{
Bharata B Raof032b732018-12-12 15:15:30 +1100725 kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200726}
727
Marcelo Tosatti2df72e92012-08-24 15:54:57 -0300728void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
729 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300730{
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000731 kvmppc_core_flush_memslot(kvm, slot);
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300732}
733
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500734struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
735{
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600736 struct kvm_vcpu *vcpu;
737 vcpu = kvmppc_core_vcpu_create(kvm, id);
Matt Evans03cdab52011-12-06 21:19:42 +0000738 if (!IS_ERR(vcpu)) {
739 vcpu->arch.wqp = &vcpu->wq;
Wei Yongjun06056bf2010-03-09 14:13:43 +0800740 kvmppc_create_vcpu_debugfs(vcpu, id);
Matt Evans03cdab52011-12-06 21:19:42 +0000741 }
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600742 return vcpu;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500743}
744
Dominik Dingel31928aa2014-12-04 15:47:07 +0100745void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200746{
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200747}
748
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500749void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
750{
Alexander Grafa5954052010-02-22 16:52:14 +0100751 /* Make sure we're not using the vcpu anymore */
752 hrtimer_cancel(&vcpu->arch.dec_timer);
Alexander Grafa5954052010-02-22 16:52:14 +0100753
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600754 kvmppc_remove_vcpu_debugfs(vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000755
756 switch (vcpu->arch.irq_type) {
757 case KVMPPC_IRQ_MPIC:
758 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
759 break;
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000760 case KVMPPC_IRQ_XICS:
Paul Mackerras03f95332019-02-04 22:07:20 +1100761 if (xics_on_xive())
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000762 kvmppc_xive_cleanup_vcpu(vcpu);
763 else
764 kvmppc_xics_free_icp(vcpu);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000765 break;
Cédric Le Goatereacc56b2019-04-18 12:39:28 +0200766 case KVMPPC_IRQ_XIVE:
767 kvmppc_xive_native_cleanup_vcpu(vcpu);
768 break;
Scott Woodeb1e4f42013-04-12 14:08:47 +0000769 }
770
Hollis Blancharddb93f572008-11-05 09:36:18 -0600771 kvmppc_core_vcpu_free(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500772}
773
774void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
775{
776 kvm_arch_vcpu_free(vcpu);
777}
778
779int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
780{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600781 return kvmppc_core_pending_dec(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500782}
783
Thomas Huth5358a962015-05-22 09:25:02 +0200784static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
Alexander Graf544c6762009-11-02 12:02:31 +0000785{
786 struct kvm_vcpu *vcpu;
787
788 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
Mihai Caramand02d4d12014-09-01 17:19:56 +0300789 kvmppc_decrementer_func(vcpu);
Alexander Graf544c6762009-11-02 12:02:31 +0000790
791 return HRTIMER_NORESTART;
792}
793
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500794int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
795{
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000796 int ret;
797
Alexander Graf544c6762009-11-02 12:02:31 +0000798 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
Alexander Graf544c6762009-11-02 12:02:31 +0000799 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
Paul Mackerras58555642018-01-12 20:55:20 +1100800 vcpu->arch.dec_expires = get_tb();
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500801
Bharat Bhushan09000ad2011-03-25 10:32:13 +0530802#ifdef CONFIG_KVM_EXIT_TIMING
803 mutex_init(&vcpu->arch.exit_timing_lock);
804#endif
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000805 ret = kvmppc_subarch_vcpu_init(vcpu);
806 return ret;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500807}
808
809void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
810{
Hollis Blanchardecc09812009-01-03 16:22:59 -0600811 kvmppc_mmu_destroy(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000812 kvmppc_subarch_vcpu_uninit(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500813}
814
815void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
816{
Scott Woodeab17672011-04-27 17:24:10 -0500817#ifdef CONFIG_BOOKE
818 /*
819 * vrsave (formerly usprg0) isn't used by Linux, but may
820 * be used by the guest.
821 *
822 * On non-booke this is associated with Altivec and
823 * is handled by code in book3s.c.
824 */
825 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
826#endif
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600827 kvmppc_core_vcpu_load(vcpu, cpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500828}
829
830void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
831{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600832 kvmppc_core_vcpu_put(vcpu);
Scott Woodeab17672011-04-27 17:24:10 -0500833#ifdef CONFIG_BOOKE
834 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
835#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500836}
837
Suresh Warrier95767302016-08-19 15:35:47 +1000838/*
839 * irq_bypass_add_producer and irq_bypass_del_producer are only
840 * useful if the architecture supports PCI passthrough.
841 * irq_bypass_stop and irq_bypass_start are not needed and so
842 * kvm_ops are not defined for them.
843 */
844bool kvm_arch_has_irq_bypass(void)
845{
846 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
847 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
848}
849
850int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
851 struct irq_bypass_producer *prod)
852{
853 struct kvm_kernel_irqfd *irqfd =
854 container_of(cons, struct kvm_kernel_irqfd, consumer);
855 struct kvm *kvm = irqfd->kvm;
856
857 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
858 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
859
860 return 0;
861}
862
863void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
864 struct irq_bypass_producer *prod)
865{
866 struct kvm_kernel_irqfd *irqfd =
867 container_of(cons, struct kvm_kernel_irqfd, consumer);
868 struct kvm *kvm = irqfd->kvm;
869
870 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
871 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
872}
873
Bin Lu6f63e812017-02-21 21:12:36 +0800874#ifdef CONFIG_VSX
875static inline int kvmppc_get_vsr_dword_offset(int index)
876{
877 int offset;
878
879 if ((index != 0) && (index != 1))
880 return -1;
881
882#ifdef __BIG_ENDIAN
883 offset = index;
884#else
885 offset = 1 - index;
886#endif
887
888 return offset;
889}
890
891static inline int kvmppc_get_vsr_word_offset(int index)
892{
893 int offset;
894
895 if ((index > 3) || (index < 0))
896 return -1;
897
898#ifdef __BIG_ENDIAN
899 offset = index;
900#else
901 offset = 3 - index;
902#endif
903 return offset;
904}
905
906static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
907 u64 gpr)
908{
909 union kvmppc_one_reg val;
910 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
911 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
912
913 if (offset == -1)
914 return;
915
Simon Guo4eeb8552018-05-28 09:48:26 +0800916 if (index >= 32) {
917 val.vval = VCPU_VSX_VR(vcpu, index - 32);
Bin Lu6f63e812017-02-21 21:12:36 +0800918 val.vsxval[offset] = gpr;
Simon Guo4eeb8552018-05-28 09:48:26 +0800919 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
Bin Lu6f63e812017-02-21 21:12:36 +0800920 } else {
921 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
922 }
923}
924
925static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
926 u64 gpr)
927{
928 union kvmppc_one_reg val;
929 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
930
Simon Guo4eeb8552018-05-28 09:48:26 +0800931 if (index >= 32) {
932 val.vval = VCPU_VSX_VR(vcpu, index - 32);
Bin Lu6f63e812017-02-21 21:12:36 +0800933 val.vsxval[0] = gpr;
934 val.vsxval[1] = gpr;
Simon Guo4eeb8552018-05-28 09:48:26 +0800935 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
Bin Lu6f63e812017-02-21 21:12:36 +0800936 } else {
937 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
938 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
939 }
940}
941
Simon Guo94dd7fa2018-05-21 13:24:20 +0800942static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
943 u32 gpr)
944{
945 union kvmppc_one_reg val;
946 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
947
Simon Guo4eeb8552018-05-28 09:48:26 +0800948 if (index >= 32) {
Simon Guo94dd7fa2018-05-21 13:24:20 +0800949 val.vsx32val[0] = gpr;
950 val.vsx32val[1] = gpr;
951 val.vsx32val[2] = gpr;
952 val.vsx32val[3] = gpr;
Simon Guo4eeb8552018-05-28 09:48:26 +0800953 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
Simon Guo94dd7fa2018-05-21 13:24:20 +0800954 } else {
955 val.vsx32val[0] = gpr;
956 val.vsx32val[1] = gpr;
957 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
958 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
959 }
960}
961
Bin Lu6f63e812017-02-21 21:12:36 +0800962static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
963 u32 gpr32)
964{
965 union kvmppc_one_reg val;
966 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
967 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
968 int dword_offset, word_offset;
969
970 if (offset == -1)
971 return;
972
Simon Guo4eeb8552018-05-28 09:48:26 +0800973 if (index >= 32) {
974 val.vval = VCPU_VSX_VR(vcpu, index - 32);
Bin Lu6f63e812017-02-21 21:12:36 +0800975 val.vsx32val[offset] = gpr32;
Simon Guo4eeb8552018-05-28 09:48:26 +0800976 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
Bin Lu6f63e812017-02-21 21:12:36 +0800977 } else {
978 dword_offset = offset / 2;
979 word_offset = offset % 2;
980 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
981 val.vsx32val[word_offset] = gpr32;
982 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
983 }
984}
985#endif /* CONFIG_VSX */
986
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -0200987#ifdef CONFIG_ALTIVEC
Simon Guoacc9eb92018-05-21 13:24:26 +0800988static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
989 int index, int element_size)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -0200990{
Simon Guoacc9eb92018-05-21 13:24:26 +0800991 int offset;
992 int elts = sizeof(vector128)/element_size;
993
994 if ((index < 0) || (index >= elts))
995 return -1;
996
997 if (kvmppc_need_byteswap(vcpu))
998 offset = elts - index - 1;
999 else
1000 offset = index;
1001
1002 return offset;
1003}
1004
1005static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1006 int index)
1007{
1008 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1009}
1010
1011static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1012 int index)
1013{
1014 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1015}
1016
1017static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1018 int index)
1019{
1020 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1021}
1022
1023static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1024 int index)
1025{
1026 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1027}
1028
1029
1030static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1031 u64 gpr)
1032{
1033 union kvmppc_one_reg val;
1034 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1035 vcpu->arch.mmio_vmx_offset);
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001036 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001037
Simon Guoacc9eb92018-05-21 13:24:26 +08001038 if (offset == -1)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001039 return;
1040
Simon Guoacc9eb92018-05-21 13:24:26 +08001041 val.vval = VCPU_VSX_VR(vcpu, index);
1042 val.vsxval[offset] = gpr;
1043 VCPU_VSX_VR(vcpu, index) = val.vval;
1044}
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001045
Simon Guoacc9eb92018-05-21 13:24:26 +08001046static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1047 u32 gpr32)
1048{
1049 union kvmppc_one_reg val;
1050 int offset = kvmppc_get_vmx_word_offset(vcpu,
1051 vcpu->arch.mmio_vmx_offset);
1052 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1053
1054 if (offset == -1)
1055 return;
1056
1057 val.vval = VCPU_VSX_VR(vcpu, index);
1058 val.vsx32val[offset] = gpr32;
1059 VCPU_VSX_VR(vcpu, index) = val.vval;
1060}
1061
1062static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1063 u16 gpr16)
1064{
1065 union kvmppc_one_reg val;
1066 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1067 vcpu->arch.mmio_vmx_offset);
1068 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1069
1070 if (offset == -1)
1071 return;
1072
1073 val.vval = VCPU_VSX_VR(vcpu, index);
1074 val.vsx16val[offset] = gpr16;
1075 VCPU_VSX_VR(vcpu, index) = val.vval;
1076}
1077
1078static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1079 u8 gpr8)
1080{
1081 union kvmppc_one_reg val;
1082 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1083 vcpu->arch.mmio_vmx_offset);
1084 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1085
1086 if (offset == -1)
1087 return;
1088
1089 val.vval = VCPU_VSX_VR(vcpu, index);
1090 val.vsx8val[offset] = gpr8;
1091 VCPU_VSX_VR(vcpu, index) = val.vval;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001092}
1093#endif /* CONFIG_ALTIVEC */
1094
Bin Lu6f63e812017-02-21 21:12:36 +08001095#ifdef CONFIG_PPC_FPU
1096static inline u64 sp_to_dp(u32 fprs)
1097{
1098 u64 fprd;
1099
1100 preempt_disable();
1101 enable_kernel_fp();
1102 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1103 : "fr0");
1104 preempt_enable();
1105 return fprd;
1106}
1107
1108static inline u32 dp_to_sp(u64 fprd)
1109{
1110 u32 fprs;
1111
1112 preempt_disable();
1113 enable_kernel_fp();
1114 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1115 : "fr0");
1116 preempt_enable();
1117 return fprs;
1118}
1119
1120#else
1121#define sp_to_dp(x) (x)
1122#define dp_to_sp(x) (x)
1123#endif /* CONFIG_PPC_FPU */
1124
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001125static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1126 struct kvm_run *run)
1127{
Denis Kirjanov69b61832010-06-11 11:23:26 +00001128 u64 uninitialized_var(gpr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001129
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001130 if (run->mmio.len > sizeof(gpr)) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001131 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1132 return;
1133 }
1134
David Gibsond078eed2015-02-03 16:36:24 +11001135 if (!vcpu->arch.mmio_host_swabbed) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001136 switch (run->mmio.len) {
Alexander Grafb104d062010-02-19 11:00:29 +01001137 case 8: gpr = *(u64 *)run->mmio.data; break;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001138 case 4: gpr = *(u32 *)run->mmio.data; break;
1139 case 2: gpr = *(u16 *)run->mmio.data; break;
1140 case 1: gpr = *(u8 *)run->mmio.data; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001141 }
1142 } else {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001143 switch (run->mmio.len) {
David Gibsond078eed2015-02-03 16:36:24 +11001144 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1145 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1146 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001147 case 1: gpr = *(u8 *)run->mmio.data; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001148 }
1149 }
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001150
Bin Lu6f63e812017-02-21 21:12:36 +08001151 /* conversion between single and double precision */
1152 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1153 gpr = sp_to_dp(gpr);
1154
Alexander Graf3587d532010-02-19 11:00:30 +01001155 if (vcpu->arch.mmio_sign_extend) {
1156 switch (run->mmio.len) {
1157#ifdef CONFIG_PPC64
1158 case 4:
1159 gpr = (s64)(s32)gpr;
1160 break;
1161#endif
1162 case 2:
1163 gpr = (s64)(s16)gpr;
1164 break;
1165 case 1:
1166 gpr = (s64)(s8)gpr;
1167 break;
1168 }
1169 }
1170
Alexander Grafb3c5d3c2012-01-07 02:07:38 +01001171 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1172 case KVM_MMIO_REG_GPR:
Alexander Grafb104d062010-02-19 11:00:29 +01001173 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1174 break;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +01001175 case KVM_MMIO_REG_FPR:
Simon Guo2e6baa42018-05-21 13:24:22 +08001176 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1177 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1178
Paul Mackerrasefff1912013-10-15 20:43:02 +11001179 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +01001180 break;
Alexander Graf287d5612010-04-01 15:33:21 +02001181#ifdef CONFIG_PPC_BOOK3S
Alexander Grafb3c5d3c2012-01-07 02:07:38 +01001182 case KVM_MMIO_REG_QPR:
1183 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +01001184 break;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +01001185 case KVM_MMIO_REG_FQPR:
Paul Mackerrasefff1912013-10-15 20:43:02 +11001186 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +01001187 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +01001188 break;
Alexander Graf287d5612010-04-01 15:33:21 +02001189#endif
Bin Lu6f63e812017-02-21 21:12:36 +08001190#ifdef CONFIG_VSX
1191 case KVM_MMIO_REG_VSX:
Simon Guo2e6baa42018-05-21 13:24:22 +08001192 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1193 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1194
Simon Guoda2a32b2018-05-21 13:24:25 +08001195 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
Bin Lu6f63e812017-02-21 21:12:36 +08001196 kvmppc_set_vsr_dword(vcpu, gpr);
Simon Guoda2a32b2018-05-21 13:24:25 +08001197 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
Bin Lu6f63e812017-02-21 21:12:36 +08001198 kvmppc_set_vsr_word(vcpu, gpr);
Simon Guoda2a32b2018-05-21 13:24:25 +08001199 else if (vcpu->arch.mmio_copy_type ==
Bin Lu6f63e812017-02-21 21:12:36 +08001200 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1201 kvmppc_set_vsr_dword_dump(vcpu, gpr);
Simon Guoda2a32b2018-05-21 13:24:25 +08001202 else if (vcpu->arch.mmio_copy_type ==
Simon Guo94dd7fa2018-05-21 13:24:20 +08001203 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1204 kvmppc_set_vsr_word_dump(vcpu, gpr);
Bin Lu6f63e812017-02-21 21:12:36 +08001205 break;
1206#endif
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001207#ifdef CONFIG_ALTIVEC
1208 case KVM_MMIO_REG_VMX:
Simon Guo2e6baa42018-05-21 13:24:22 +08001209 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1210 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1211
Simon Guoacc9eb92018-05-21 13:24:26 +08001212 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1213 kvmppc_set_vmx_dword(vcpu, gpr);
1214 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1215 kvmppc_set_vmx_word(vcpu, gpr);
1216 else if (vcpu->arch.mmio_copy_type ==
1217 KVMPPC_VMX_COPY_HWORD)
1218 kvmppc_set_vmx_hword(vcpu, gpr);
1219 else if (vcpu->arch.mmio_copy_type ==
1220 KVMPPC_VMX_COPY_BYTE)
1221 kvmppc_set_vmx_byte(vcpu, gpr);
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001222 break;
1223#endif
Suraj Jitindar Singh873db2c2018-12-14 16:29:08 +11001224#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1225 case KVM_MMIO_REG_NESTED_GPR:
1226 if (kvmppc_need_byteswap(vcpu))
1227 gpr = swab64(gpr);
1228 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1229 sizeof(gpr));
1230 break;
1231#endif
Alexander Grafb104d062010-02-19 11:00:29 +01001232 default:
1233 BUG();
1234 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001235}
1236
Paul Mackerraseb8b0562016-05-05 16:17:10 +10001237static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1238 unsigned int rt, unsigned int bytes,
1239 int is_default_endian, int sign_extend)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001240{
Scott Wooded840ee2013-04-26 14:53:39 +00001241 int idx, ret;
David Gibsond078eed2015-02-03 16:36:24 +11001242 bool host_swabbed;
Cédric Le Goater73601772014-01-09 11:51:16 +01001243
David Gibsond078eed2015-02-03 16:36:24 +11001244 /* Pity C doesn't have a logical XOR operator */
Cédric Le Goater73601772014-01-09 11:51:16 +01001245 if (kvmppc_need_byteswap(vcpu)) {
David Gibsond078eed2015-02-03 16:36:24 +11001246 host_swabbed = is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +01001247 } else {
David Gibsond078eed2015-02-03 16:36:24 +11001248 host_swabbed = !is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +01001249 }
Scott Wooded840ee2013-04-26 14:53:39 +00001250
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001251 if (bytes > sizeof(run->mmio.data)) {
1252 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1253 run->mmio.len);
1254 }
1255
1256 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1257 run->mmio.len = bytes;
1258 run->mmio.is_write = 0;
1259
1260 vcpu->arch.io_gpr = rt;
David Gibsond078eed2015-02-03 16:36:24 +11001261 vcpu->arch.mmio_host_swabbed = host_swabbed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001262 vcpu->mmio_needed = 1;
1263 vcpu->mmio_is_write = 0;
Paul Mackerraseb8b0562016-05-05 16:17:10 +10001264 vcpu->arch.mmio_sign_extend = sign_extend;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001265
Scott Wooded840ee2013-04-26 14:53:39 +00001266 idx = srcu_read_lock(&vcpu->kvm->srcu);
1267
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00001268 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
Scott Wooded840ee2013-04-26 14:53:39 +00001269 bytes, &run->mmio.data);
1270
1271 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1272
1273 if (!ret) {
Alexander Graf0e673fb2012-10-09 00:06:20 +02001274 kvmppc_complete_mmio_load(vcpu, run);
1275 vcpu->mmio_needed = 0;
1276 return EMULATE_DONE;
1277 }
1278
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001279 return EMULATE_DO_MMIO;
1280}
Paul Mackerraseb8b0562016-05-05 16:17:10 +10001281
1282int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1283 unsigned int rt, unsigned int bytes,
1284 int is_default_endian)
1285{
1286 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1287}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301288EXPORT_SYMBOL_GPL(kvmppc_handle_load);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001289
Alexander Graf3587d532010-02-19 11:00:30 +01001290/* Same as above, but sign extends */
1291int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +01001292 unsigned int rt, unsigned int bytes,
1293 int is_default_endian)
Alexander Graf3587d532010-02-19 11:00:30 +01001294{
Paul Mackerraseb8b0562016-05-05 16:17:10 +10001295 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
Alexander Graf3587d532010-02-19 11:00:30 +01001296}
1297
Bin Lu6f63e812017-02-21 21:12:36 +08001298#ifdef CONFIG_VSX
1299int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1300 unsigned int rt, unsigned int bytes,
1301 int is_default_endian, int mmio_sign_extend)
1302{
1303 enum emulation_result emulated = EMULATE_DONE;
1304
Paul Mackerras9aa68252017-11-20 19:56:27 +11001305 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1306 if (vcpu->arch.mmio_vsx_copy_nums > 4)
Bin Lu6f63e812017-02-21 21:12:36 +08001307 return EMULATE_FAIL;
Bin Lu6f63e812017-02-21 21:12:36 +08001308
1309 while (vcpu->arch.mmio_vsx_copy_nums) {
1310 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1311 is_default_endian, mmio_sign_extend);
1312
1313 if (emulated != EMULATE_DONE)
1314 break;
1315
1316 vcpu->arch.paddr_accessed += run->mmio.len;
1317
1318 vcpu->arch.mmio_vsx_copy_nums--;
1319 vcpu->arch.mmio_vsx_offset++;
1320 }
1321 return emulated;
1322}
1323#endif /* CONFIG_VSX */
1324
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001325int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +01001326 u64 val, unsigned int bytes, int is_default_endian)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001327{
1328 void *data = run->mmio.data;
Scott Wooded840ee2013-04-26 14:53:39 +00001329 int idx, ret;
David Gibsond078eed2015-02-03 16:36:24 +11001330 bool host_swabbed;
Cédric Le Goater73601772014-01-09 11:51:16 +01001331
David Gibsond078eed2015-02-03 16:36:24 +11001332 /* Pity C doesn't have a logical XOR operator */
Cédric Le Goater73601772014-01-09 11:51:16 +01001333 if (kvmppc_need_byteswap(vcpu)) {
David Gibsond078eed2015-02-03 16:36:24 +11001334 host_swabbed = is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +01001335 } else {
David Gibsond078eed2015-02-03 16:36:24 +11001336 host_swabbed = !is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +01001337 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001338
1339 if (bytes > sizeof(run->mmio.data)) {
1340 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1341 run->mmio.len);
1342 }
1343
1344 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1345 run->mmio.len = bytes;
1346 run->mmio.is_write = 1;
1347 vcpu->mmio_needed = 1;
1348 vcpu->mmio_is_write = 1;
1349
Bin Lu6f63e812017-02-21 21:12:36 +08001350 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1351 val = dp_to_sp(val);
1352
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001353 /* Store the value at the lowest bytes in 'data'. */
David Gibsond078eed2015-02-03 16:36:24 +11001354 if (!host_swabbed) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001355 switch (bytes) {
Alexander Grafb104d062010-02-19 11:00:29 +01001356 case 8: *(u64 *)data = val; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001357 case 4: *(u32 *)data = val; break;
1358 case 2: *(u16 *)data = val; break;
1359 case 1: *(u8 *)data = val; break;
1360 }
1361 } else {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001362 switch (bytes) {
David Gibsond078eed2015-02-03 16:36:24 +11001363 case 8: *(u64 *)data = swab64(val); break;
1364 case 4: *(u32 *)data = swab32(val); break;
1365 case 2: *(u16 *)data = swab16(val); break;
1366 case 1: *(u8 *)data = val; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001367 }
1368 }
1369
Scott Wooded840ee2013-04-26 14:53:39 +00001370 idx = srcu_read_lock(&vcpu->kvm->srcu);
1371
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00001372 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
Scott Wooded840ee2013-04-26 14:53:39 +00001373 bytes, &run->mmio.data);
1374
1375 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1376
1377 if (!ret) {
Alexander Graf0e673fb2012-10-09 00:06:20 +02001378 vcpu->mmio_needed = 0;
1379 return EMULATE_DONE;
1380 }
1381
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001382 return EMULATE_DO_MMIO;
1383}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301384EXPORT_SYMBOL_GPL(kvmppc_handle_store);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001385
Bin Lu6f63e812017-02-21 21:12:36 +08001386#ifdef CONFIG_VSX
1387static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1388{
1389 u32 dword_offset, word_offset;
1390 union kvmppc_one_reg reg;
1391 int vsx_offset = 0;
Simon Guoda2a32b2018-05-21 13:24:25 +08001392 int copy_type = vcpu->arch.mmio_copy_type;
Bin Lu6f63e812017-02-21 21:12:36 +08001393 int result = 0;
1394
1395 switch (copy_type) {
1396 case KVMPPC_VSX_COPY_DWORD:
1397 vsx_offset =
1398 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1399
1400 if (vsx_offset == -1) {
1401 result = -1;
1402 break;
1403 }
1404
Simon Guo4eeb8552018-05-28 09:48:26 +08001405 if (rs < 32) {
Bin Lu6f63e812017-02-21 21:12:36 +08001406 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1407 } else {
Simon Guo4eeb8552018-05-28 09:48:26 +08001408 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
Bin Lu6f63e812017-02-21 21:12:36 +08001409 *val = reg.vsxval[vsx_offset];
1410 }
1411 break;
1412
1413 case KVMPPC_VSX_COPY_WORD:
1414 vsx_offset =
1415 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1416
1417 if (vsx_offset == -1) {
1418 result = -1;
1419 break;
1420 }
1421
Simon Guo4eeb8552018-05-28 09:48:26 +08001422 if (rs < 32) {
Bin Lu6f63e812017-02-21 21:12:36 +08001423 dword_offset = vsx_offset / 2;
1424 word_offset = vsx_offset % 2;
1425 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1426 *val = reg.vsx32val[word_offset];
1427 } else {
Simon Guo4eeb8552018-05-28 09:48:26 +08001428 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
Bin Lu6f63e812017-02-21 21:12:36 +08001429 *val = reg.vsx32val[vsx_offset];
1430 }
1431 break;
1432
1433 default:
1434 result = -1;
1435 break;
1436 }
1437
1438 return result;
1439}
1440
1441int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1442 int rs, unsigned int bytes, int is_default_endian)
1443{
1444 u64 val;
1445 enum emulation_result emulated = EMULATE_DONE;
1446
1447 vcpu->arch.io_gpr = rs;
1448
Paul Mackerras9aa68252017-11-20 19:56:27 +11001449 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1450 if (vcpu->arch.mmio_vsx_copy_nums > 4)
Bin Lu6f63e812017-02-21 21:12:36 +08001451 return EMULATE_FAIL;
Bin Lu6f63e812017-02-21 21:12:36 +08001452
1453 while (vcpu->arch.mmio_vsx_copy_nums) {
1454 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1455 return EMULATE_FAIL;
1456
1457 emulated = kvmppc_handle_store(run, vcpu,
1458 val, bytes, is_default_endian);
1459
1460 if (emulated != EMULATE_DONE)
1461 break;
1462
1463 vcpu->arch.paddr_accessed += run->mmio.len;
1464
1465 vcpu->arch.mmio_vsx_copy_nums--;
1466 vcpu->arch.mmio_vsx_offset++;
1467 }
1468
1469 return emulated;
1470}
1471
1472static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1473 struct kvm_run *run)
1474{
1475 enum emulation_result emulated = EMULATE_FAIL;
1476 int r;
1477
1478 vcpu->arch.paddr_accessed += run->mmio.len;
1479
1480 if (!vcpu->mmio_is_write) {
1481 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1482 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1483 } else {
1484 emulated = kvmppc_handle_vsx_store(run, vcpu,
1485 vcpu->arch.io_gpr, run->mmio.len, 1);
1486 }
1487
1488 switch (emulated) {
1489 case EMULATE_DO_MMIO:
1490 run->exit_reason = KVM_EXIT_MMIO;
1491 r = RESUME_HOST;
1492 break;
1493 case EMULATE_FAIL:
1494 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1495 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1496 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1497 r = RESUME_HOST;
1498 break;
1499 default:
1500 r = RESUME_GUEST;
1501 break;
1502 }
1503 return r;
1504}
1505#endif /* CONFIG_VSX */
1506
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001507#ifdef CONFIG_ALTIVEC
Simon Guoacc9eb92018-05-21 13:24:26 +08001508int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1509 unsigned int rt, unsigned int bytes, int is_default_endian)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001510{
Paul Mackerras6df38772018-02-13 15:45:21 +11001511 enum emulation_result emulated = EMULATE_DONE;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001512
Simon Guoacc9eb92018-05-21 13:24:26 +08001513 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1514 return EMULATE_FAIL;
1515
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001516 while (vcpu->arch.mmio_vmx_copy_nums) {
Simon Guoacc9eb92018-05-21 13:24:26 +08001517 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001518 is_default_endian, 0);
1519
1520 if (emulated != EMULATE_DONE)
1521 break;
1522
1523 vcpu->arch.paddr_accessed += run->mmio.len;
1524 vcpu->arch.mmio_vmx_copy_nums--;
Simon Guoacc9eb92018-05-21 13:24:26 +08001525 vcpu->arch.mmio_vmx_offset++;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001526 }
1527
1528 return emulated;
1529}
1530
Simon Guoacc9eb92018-05-21 13:24:26 +08001531int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001532{
Simon Guoacc9eb92018-05-21 13:24:26 +08001533 union kvmppc_one_reg reg;
1534 int vmx_offset = 0;
1535 int result = 0;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001536
Simon Guoacc9eb92018-05-21 13:24:26 +08001537 vmx_offset =
1538 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1539
1540 if (vmx_offset == -1)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001541 return -1;
1542
Simon Guoacc9eb92018-05-21 13:24:26 +08001543 reg.vval = VCPU_VSX_VR(vcpu, index);
1544 *val = reg.vsxval[vmx_offset];
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001545
Simon Guoacc9eb92018-05-21 13:24:26 +08001546 return result;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001547}
1548
Simon Guoacc9eb92018-05-21 13:24:26 +08001549int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1550{
1551 union kvmppc_one_reg reg;
1552 int vmx_offset = 0;
1553 int result = 0;
1554
1555 vmx_offset =
1556 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1557
1558 if (vmx_offset == -1)
1559 return -1;
1560
1561 reg.vval = VCPU_VSX_VR(vcpu, index);
1562 *val = reg.vsx32val[vmx_offset];
1563
1564 return result;
1565}
1566
1567int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1568{
1569 union kvmppc_one_reg reg;
1570 int vmx_offset = 0;
1571 int result = 0;
1572
1573 vmx_offset =
1574 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1575
1576 if (vmx_offset == -1)
1577 return -1;
1578
1579 reg.vval = VCPU_VSX_VR(vcpu, index);
1580 *val = reg.vsx16val[vmx_offset];
1581
1582 return result;
1583}
1584
1585int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1586{
1587 union kvmppc_one_reg reg;
1588 int vmx_offset = 0;
1589 int result = 0;
1590
1591 vmx_offset =
1592 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1593
1594 if (vmx_offset == -1)
1595 return -1;
1596
1597 reg.vval = VCPU_VSX_VR(vcpu, index);
1598 *val = reg.vsx8val[vmx_offset];
1599
1600 return result;
1601}
1602
1603int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1604 unsigned int rs, unsigned int bytes, int is_default_endian)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001605{
1606 u64 val = 0;
Simon Guoacc9eb92018-05-21 13:24:26 +08001607 unsigned int index = rs & KVM_MMIO_REG_MASK;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001608 enum emulation_result emulated = EMULATE_DONE;
1609
Simon Guoacc9eb92018-05-21 13:24:26 +08001610 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1611 return EMULATE_FAIL;
1612
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001613 vcpu->arch.io_gpr = rs;
1614
1615 while (vcpu->arch.mmio_vmx_copy_nums) {
Simon Guoacc9eb92018-05-21 13:24:26 +08001616 switch (vcpu->arch.mmio_copy_type) {
1617 case KVMPPC_VMX_COPY_DWORD:
1618 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1619 return EMULATE_FAIL;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001620
Simon Guoacc9eb92018-05-21 13:24:26 +08001621 break;
1622 case KVMPPC_VMX_COPY_WORD:
1623 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1624 return EMULATE_FAIL;
1625 break;
1626 case KVMPPC_VMX_COPY_HWORD:
1627 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1628 return EMULATE_FAIL;
1629 break;
1630 case KVMPPC_VMX_COPY_BYTE:
1631 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1632 return EMULATE_FAIL;
1633 break;
1634 default:
1635 return EMULATE_FAIL;
1636 }
1637
1638 emulated = kvmppc_handle_store(run, vcpu, val, bytes,
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001639 is_default_endian);
1640 if (emulated != EMULATE_DONE)
1641 break;
1642
1643 vcpu->arch.paddr_accessed += run->mmio.len;
1644 vcpu->arch.mmio_vmx_copy_nums--;
Simon Guoacc9eb92018-05-21 13:24:26 +08001645 vcpu->arch.mmio_vmx_offset++;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001646 }
1647
1648 return emulated;
1649}
1650
1651static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1652 struct kvm_run *run)
1653{
1654 enum emulation_result emulated = EMULATE_FAIL;
1655 int r;
1656
1657 vcpu->arch.paddr_accessed += run->mmio.len;
1658
1659 if (!vcpu->mmio_is_write) {
Simon Guoacc9eb92018-05-21 13:24:26 +08001660 emulated = kvmppc_handle_vmx_load(run, vcpu,
1661 vcpu->arch.io_gpr, run->mmio.len, 1);
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001662 } else {
Simon Guoacc9eb92018-05-21 13:24:26 +08001663 emulated = kvmppc_handle_vmx_store(run, vcpu,
1664 vcpu->arch.io_gpr, run->mmio.len, 1);
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001665 }
1666
1667 switch (emulated) {
1668 case EMULATE_DO_MMIO:
1669 run->exit_reason = KVM_EXIT_MMIO;
1670 r = RESUME_HOST;
1671 break;
1672 case EMULATE_FAIL:
1673 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1674 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1675 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1676 r = RESUME_HOST;
1677 break;
1678 default:
1679 r = RESUME_GUEST;
1680 break;
1681 }
1682 return r;
1683}
1684#endif /* CONFIG_ALTIVEC */
1685
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001686int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1687{
1688 int r = 0;
1689 union kvmppc_one_reg val;
1690 int size;
1691
1692 size = one_reg_size(reg->id);
1693 if (size > sizeof(val))
1694 return -EINVAL;
1695
1696 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1697 if (r == -EINVAL) {
1698 r = 0;
1699 switch (reg->id) {
Mihai Caraman3840edc2014-08-20 16:36:25 +03001700#ifdef CONFIG_ALTIVEC
1701 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1702 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1703 r = -ENXIO;
1704 break;
1705 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001706 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
Mihai Caraman3840edc2014-08-20 16:36:25 +03001707 break;
1708 case KVM_REG_PPC_VSCR:
1709 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1710 r = -ENXIO;
1711 break;
1712 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001713 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001714 break;
1715 case KVM_REG_PPC_VRSAVE:
Greg Kurzb4d7f162016-01-13 18:28:17 +01001716 val = get_reg_val(reg->id, vcpu->arch.vrsave);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001717 break;
1718#endif /* CONFIG_ALTIVEC */
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001719 default:
1720 r = -EINVAL;
1721 break;
1722 }
1723 }
1724
1725 if (r)
1726 return r;
1727
1728 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1729 r = -EFAULT;
1730
1731 return r;
1732}
1733
1734int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1735{
1736 int r;
1737 union kvmppc_one_reg val;
1738 int size;
1739
1740 size = one_reg_size(reg->id);
1741 if (size > sizeof(val))
1742 return -EINVAL;
1743
1744 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1745 return -EFAULT;
1746
1747 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1748 if (r == -EINVAL) {
1749 r = 0;
1750 switch (reg->id) {
Mihai Caraman3840edc2014-08-20 16:36:25 +03001751#ifdef CONFIG_ALTIVEC
1752 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1753 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1754 r = -ENXIO;
1755 break;
1756 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001757 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
Mihai Caraman3840edc2014-08-20 16:36:25 +03001758 break;
1759 case KVM_REG_PPC_VSCR:
1760 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1761 r = -ENXIO;
1762 break;
1763 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001764 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001765 break;
1766 case KVM_REG_PPC_VRSAVE:
Greg Kurzb4d7f162016-01-13 18:28:17 +01001767 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1768 r = -ENXIO;
1769 break;
1770 }
1771 vcpu->arch.vrsave = set_reg_val(reg->id, val);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001772 break;
1773#endif /* CONFIG_ALTIVEC */
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001774 default:
1775 r = -EINVAL;
1776 break;
1777 }
1778 }
1779
1780 return r;
1781}
1782
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001783int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1784{
1785 int r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001786
Christoffer Dallaccb7572017-12-04 21:35:25 +01001787 vcpu_load(vcpu);
1788
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001789 if (vcpu->mmio_needed) {
Bin Lu6f63e812017-02-21 21:12:36 +08001790 vcpu->mmio_needed = 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001791 if (!vcpu->mmio_is_write)
1792 kvmppc_complete_mmio_load(vcpu, run);
Bin Lu6f63e812017-02-21 21:12:36 +08001793#ifdef CONFIG_VSX
1794 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1795 vcpu->arch.mmio_vsx_copy_nums--;
1796 vcpu->arch.mmio_vsx_offset++;
1797 }
1798
1799 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1800 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1801 if (r == RESUME_HOST) {
1802 vcpu->mmio_needed = 1;
Christoffer Dallaccb7572017-12-04 21:35:25 +01001803 goto out;
Bin Lu6f63e812017-02-21 21:12:36 +08001804 }
1805 }
1806#endif
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001807#ifdef CONFIG_ALTIVEC
Simon Guoacc9eb92018-05-21 13:24:26 +08001808 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001809 vcpu->arch.mmio_vmx_copy_nums--;
Simon Guoacc9eb92018-05-21 13:24:26 +08001810 vcpu->arch.mmio_vmx_offset++;
1811 }
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001812
1813 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1814 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1815 if (r == RESUME_HOST) {
1816 vcpu->mmio_needed = 1;
Radim Krčmář1ab03c02018-02-09 21:36:57 +01001817 goto out;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001818 }
1819 }
1820#endif
Alexander Grafad0a0482010-03-24 21:48:30 +01001821 } else if (vcpu->arch.osi_needed) {
1822 u64 *gprs = run->osi.gprs;
1823 int i;
1824
1825 for (i = 0; i < 32; i++)
1826 kvmppc_set_gpr(vcpu, i, gprs[i]);
1827 vcpu->arch.osi_needed = 0;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001828 } else if (vcpu->arch.hcall_needed) {
1829 int i;
1830
1831 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1832 for (i = 0; i < 9; ++i)
1833 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1834 vcpu->arch.hcall_needed = 0;
Alexander Graf1c810632013-01-04 18:12:48 +01001835#ifdef CONFIG_BOOKE
1836 } else if (vcpu->arch.epr_needed) {
1837 kvmppc_set_epr(vcpu, run->epr.epr);
1838 vcpu->arch.epr_needed = 0;
1839#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001840 }
1841
Jan H. Schönherr20b70352017-11-24 22:39:01 +01001842 kvm_sigset_activate(vcpu);
Bin Lu6f63e812017-02-21 21:12:36 +08001843
Paolo Bonzini460df4c2017-02-08 11:50:15 +01001844 if (run->immediate_exit)
1845 r = -EINTR;
1846 else
1847 r = kvmppc_vcpu_run(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001848
Jan H. Schönherr20b70352017-11-24 22:39:01 +01001849 kvm_sigset_deactivate(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001850
Paul Mackerrasc662f772018-02-13 15:16:01 +11001851#ifdef CONFIG_ALTIVEC
Christoffer Dallaccb7572017-12-04 21:35:25 +01001852out:
Paul Mackerrasc662f772018-02-13 15:16:01 +11001853#endif
Christoffer Dallaccb7572017-12-04 21:35:25 +01001854 vcpu_put(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001855 return r;
1856}
1857
1858int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1859{
Paul Mackerras19ccb762011-07-23 17:42:46 +10001860 if (irq->irq == KVM_INTERRUPT_UNSET) {
Paul Mackerras4fe27d22013-02-14 14:00:25 +00001861 kvmppc_core_dequeue_external(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001862 return 0;
1863 }
Hollis Blanchard45c5eb62008-04-25 17:55:49 -05001864
Paul Mackerras19ccb762011-07-23 17:42:46 +10001865 kvmppc_core_queue_external(vcpu, irq);
Christoffer Dallb6d33832012-03-08 16:44:24 -05001866
Scott Wooddfd4d472011-11-17 12:39:59 +00001867 kvm_vcpu_kick(vcpu);
Hollis Blanchard45c5eb62008-04-25 17:55:49 -05001868
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001869 return 0;
1870}
1871
Alexander Graf71fbfd52010-03-24 21:48:29 +01001872static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1873 struct kvm_enable_cap *cap)
1874{
1875 int r;
1876
1877 if (cap->flags)
1878 return -EINVAL;
1879
1880 switch (cap->cap) {
Alexander Grafad0a0482010-03-24 21:48:30 +01001881 case KVM_CAP_PPC_OSI:
1882 r = 0;
1883 vcpu->arch.osi_enabled = true;
1884 break;
Alexander Graf930b4122011-08-08 17:29:42 +02001885 case KVM_CAP_PPC_PAPR:
1886 r = 0;
1887 vcpu->arch.papr_enabled = true;
1888 break;
Alexander Graf1c810632013-01-04 18:12:48 +01001889 case KVM_CAP_PPC_EPR:
1890 r = 0;
Scott Wood5df554ad2013-04-12 14:08:46 +00001891 if (cap->args[0])
1892 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1893 else
1894 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
Alexander Graf1c810632013-01-04 18:12:48 +01001895 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001896#ifdef CONFIG_BOOKE
1897 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1898 r = 0;
1899 vcpu->arch.watchdog_enabled = true;
1900 break;
1901#endif
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001902#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -05001903 case KVM_CAP_SW_TLB: {
1904 struct kvm_config_tlb cfg;
1905 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1906
1907 r = -EFAULT;
1908 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1909 break;
1910
1911 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1912 break;
1913 }
1914#endif
Scott Woodeb1e4f42013-04-12 14:08:47 +00001915#ifdef CONFIG_KVM_MPIC
1916 case KVM_CAP_IRQ_MPIC: {
Al Viro70abade2013-08-30 15:04:22 -04001917 struct fd f;
Scott Woodeb1e4f42013-04-12 14:08:47 +00001918 struct kvm_device *dev;
1919
1920 r = -EBADF;
Al Viro70abade2013-08-30 15:04:22 -04001921 f = fdget(cap->args[0]);
1922 if (!f.file)
Scott Woodeb1e4f42013-04-12 14:08:47 +00001923 break;
1924
1925 r = -EPERM;
Al Viro70abade2013-08-30 15:04:22 -04001926 dev = kvm_device_from_filp(f.file);
Scott Woodeb1e4f42013-04-12 14:08:47 +00001927 if (dev)
1928 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1929
Al Viro70abade2013-08-30 15:04:22 -04001930 fdput(f);
Scott Woodeb1e4f42013-04-12 14:08:47 +00001931 break;
1932 }
1933#endif
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001934#ifdef CONFIG_KVM_XICS
1935 case KVM_CAP_IRQ_XICS: {
Al Viro70abade2013-08-30 15:04:22 -04001936 struct fd f;
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001937 struct kvm_device *dev;
1938
1939 r = -EBADF;
Al Viro70abade2013-08-30 15:04:22 -04001940 f = fdget(cap->args[0]);
1941 if (!f.file)
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001942 break;
1943
1944 r = -EPERM;
Al Viro70abade2013-08-30 15:04:22 -04001945 dev = kvm_device_from_filp(f.file);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001946 if (dev) {
Paul Mackerras03f95332019-02-04 22:07:20 +11001947 if (xics_on_xive())
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001948 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1949 else
1950 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1951 }
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001952
Al Viro70abade2013-08-30 15:04:22 -04001953 fdput(f);
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001954 break;
1955 }
1956#endif /* CONFIG_KVM_XICS */
Cédric Le Goatereacc56b2019-04-18 12:39:28 +02001957#ifdef CONFIG_KVM_XIVE
1958 case KVM_CAP_PPC_IRQ_XIVE: {
1959 struct fd f;
1960 struct kvm_device *dev;
1961
1962 r = -EBADF;
1963 f = fdget(cap->args[0]);
1964 if (!f.file)
1965 break;
1966
1967 r = -ENXIO;
1968 if (!xive_enabled())
1969 break;
1970
1971 r = -EPERM;
1972 dev = kvm_device_from_filp(f.file);
1973 if (dev)
1974 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1975 cap->args[1]);
1976
1977 fdput(f);
1978 break;
1979 }
1980#endif /* CONFIG_KVM_XIVE */
Aravinda Prasad134764e2017-05-11 16:32:48 +05301981#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1982 case KVM_CAP_PPC_FWNMI:
1983 r = -EINVAL;
1984 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1985 break;
1986 r = 0;
1987 vcpu->kvm->arch.fwnmi_enabled = true;
1988 break;
1989#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
Alexander Graf71fbfd52010-03-24 21:48:29 +01001990 default:
1991 r = -EINVAL;
1992 break;
1993 }
1994
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001995 if (!r)
1996 r = kvmppc_sanity_check(vcpu);
1997
Alexander Graf71fbfd52010-03-24 21:48:29 +01001998 return r;
1999}
2000
Paul Mackerras34a75b02016-08-10 11:27:27 +10002001bool kvm_arch_intc_initialized(struct kvm *kvm)
2002{
2003#ifdef CONFIG_KVM_MPIC
2004 if (kvm->arch.mpic)
2005 return true;
2006#endif
2007#ifdef CONFIG_KVM_XICS
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002008 if (kvm->arch.xics || kvm->arch.xive)
Paul Mackerras34a75b02016-08-10 11:27:27 +10002009 return true;
2010#endif
2011 return false;
2012}
2013
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002014int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2015 struct kvm_mp_state *mp_state)
2016{
2017 return -EINVAL;
2018}
2019
2020int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2021 struct kvm_mp_state *mp_state)
2022{
2023 return -EINVAL;
2024}
2025
Paolo Bonzini5cb09442017-12-12 17:41:34 +01002026long kvm_arch_vcpu_async_ioctl(struct file *filp,
2027 unsigned int ioctl, unsigned long arg)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002028{
2029 struct kvm_vcpu *vcpu = filp->private_data;
2030 void __user *argp = (void __user *)arg;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002031
Christoffer Dall9b0624712017-12-04 21:35:36 +01002032 if (ioctl == KVM_INTERRUPT) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002033 struct kvm_interrupt irq;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002034 if (copy_from_user(&irq, argp, sizeof(irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01002035 return -EFAULT;
2036 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002037 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01002038 return -ENOIOCTLCMD;
2039}
2040
2041long kvm_arch_vcpu_ioctl(struct file *filp,
2042 unsigned int ioctl, unsigned long arg)
2043{
2044 struct kvm_vcpu *vcpu = filp->private_data;
2045 void __user *argp = (void __user *)arg;
2046 long r;
Avi Kivity19483d12010-05-13 12:30:43 +03002047
Christoffer Dall9b0624712017-12-04 21:35:36 +01002048 switch (ioctl) {
Alexander Graf71fbfd52010-03-24 21:48:29 +01002049 case KVM_ENABLE_CAP:
2050 {
2051 struct kvm_enable_cap cap;
2052 r = -EFAULT;
Simon Guob3cebfe2018-05-23 15:02:09 +08002053 vcpu_load(vcpu);
Alexander Graf71fbfd52010-03-24 21:48:29 +01002054 if (copy_from_user(&cap, argp, sizeof(cap)))
2055 goto out;
2056 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
Simon Guob3cebfe2018-05-23 15:02:09 +08002057 vcpu_put(vcpu);
Alexander Graf71fbfd52010-03-24 21:48:29 +01002058 break;
2059 }
Scott Wooddc83b8b2011-08-18 15:25:21 -05002060
Alexander Grafe24ed812011-09-14 10:02:41 +02002061 case KVM_SET_ONE_REG:
2062 case KVM_GET_ONE_REG:
2063 {
2064 struct kvm_one_reg reg;
2065 r = -EFAULT;
2066 if (copy_from_user(&reg, argp, sizeof(reg)))
2067 goto out;
2068 if (ioctl == KVM_SET_ONE_REG)
2069 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2070 else
2071 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2072 break;
2073 }
2074
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00002075#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -05002076 case KVM_DIRTY_TLB: {
2077 struct kvm_dirty_tlb dirty;
2078 r = -EFAULT;
Simon Guob3cebfe2018-05-23 15:02:09 +08002079 vcpu_load(vcpu);
Scott Wooddc83b8b2011-08-18 15:25:21 -05002080 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2081 goto out;
2082 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
Simon Guob3cebfe2018-05-23 15:02:09 +08002083 vcpu_put(vcpu);
Scott Wooddc83b8b2011-08-18 15:25:21 -05002084 break;
2085 }
2086#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002087 default:
2088 r = -EINVAL;
2089 }
2090
2091out:
2092 return r;
2093}
2094
Souptick Joarder1499fa82018-04-19 00:49:58 +05302095vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01002096{
2097 return VM_FAULT_SIGBUS;
2098}
2099
Alexander Graf15711e92010-07-29 14:48:08 +02002100static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2101{
Stuart Yoder784bafa2012-07-03 05:48:51 +00002102 u32 inst_nop = 0x60000000;
2103#ifdef CONFIG_KVM_BOOKE_HV
2104 u32 inst_sc1 = 0x44000022;
Alexander Graf27431032014-04-24 13:39:16 +02002105 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2106 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2107 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2108 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
Stuart Yoder784bafa2012-07-03 05:48:51 +00002109#else
Alexander Graf15711e92010-07-29 14:48:08 +02002110 u32 inst_lis = 0x3c000000;
2111 u32 inst_ori = 0x60000000;
Alexander Graf15711e92010-07-29 14:48:08 +02002112 u32 inst_sc = 0x44000002;
2113 u32 inst_imm_mask = 0xffff;
2114
2115 /*
2116 * The hypercall to get into KVM from within guest context is as
2117 * follows:
2118 *
2119 * lis r0, r0, KVM_SC_MAGIC_R0@h
2120 * ori r0, KVM_SC_MAGIC_R0@l
2121 * sc
2122 * nop
2123 */
Alexander Graf27431032014-04-24 13:39:16 +02002124 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2125 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2126 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2127 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
Stuart Yoder784bafa2012-07-03 05:48:51 +00002128#endif
Alexander Graf15711e92010-07-29 14:48:08 +02002129
Liu Yu-B132019202e072012-07-03 05:48:52 +00002130 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2131
Alexander Graf15711e92010-07-29 14:48:08 +02002132 return 0;
2133}
2134
Alexander Graf5efdb4b2013-04-17 00:37:57 +02002135int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2136 bool line_status)
2137{
2138 if (!irqchip_in_kernel(kvm))
2139 return -ENXIO;
2140
2141 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2142 irq_event->irq, irq_event->level,
2143 line_status);
2144 return 0;
2145}
2146
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002147
Paolo Bonzinie5d83c72017-02-16 10:40:56 +01002148int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2149 struct kvm_enable_cap *cap)
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002150{
2151 int r;
2152
2153 if (cap->flags)
2154 return -EINVAL;
2155
2156 switch (cap->cap) {
2157#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2158 case KVM_CAP_PPC_ENABLE_HCALL: {
2159 unsigned long hcall = cap->args[0];
2160
2161 r = -EINVAL;
2162 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2163 cap->args[1] > 1)
2164 break;
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002165 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2166 break;
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002167 if (cap->args[1])
2168 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2169 else
2170 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2171 r = 0;
2172 break;
2173 }
Paul Mackerras3c313522017-02-06 13:24:41 +11002174 case KVM_CAP_PPC_SMT: {
2175 unsigned long mode = cap->args[0];
2176 unsigned long flags = cap->args[1];
2177
2178 r = -EINVAL;
2179 if (kvm->arch.kvm_ops->set_smt_mode)
2180 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2181 break;
2182 }
Paul Mackerrasaa069a92018-09-21 20:02:01 +10002183
2184 case KVM_CAP_PPC_NESTED_HV:
2185 r = -EINVAL;
2186 if (!is_kvmppc_hv_enabled(kvm) ||
2187 !kvm->arch.kvm_ops->enable_nested)
2188 break;
2189 r = kvm->arch.kvm_ops->enable_nested(kvm);
2190 break;
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002191#endif
2192 default:
2193 r = -EINVAL;
2194 break;
2195 }
2196
2197 return r;
2198}
2199
Paul Mackerras3214d012018-01-15 16:06:47 +11002200#ifdef CONFIG_PPC_BOOK3S_64
2201/*
2202 * These functions check whether the underlying hardware is safe
2203 * against attacks based on observing the effects of speculatively
2204 * executed instructions, and whether it supplies instructions for
2205 * use in workarounds. The information comes from firmware, either
2206 * via the device tree on powernv platforms or from an hcall on
2207 * pseries platforms.
2208 */
2209#ifdef CONFIG_PPC_PSERIES
2210static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2211{
2212 struct h_cpu_char_result c;
2213 unsigned long rc;
2214
2215 if (!machine_is(pseries))
2216 return -ENOTTY;
2217
2218 rc = plpar_get_cpu_characteristics(&c);
2219 if (rc == H_SUCCESS) {
2220 cp->character = c.character;
2221 cp->behaviour = c.behaviour;
2222 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2223 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2224 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2225 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2226 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2227 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2228 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002229 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2230 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
Paul Mackerras3214d012018-01-15 16:06:47 +11002231 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2232 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002233 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2234 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
Paul Mackerras3214d012018-01-15 16:06:47 +11002235 }
2236 return 0;
2237}
2238#else
2239static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2240{
2241 return -ENOTTY;
2242}
2243#endif
2244
2245static inline bool have_fw_feat(struct device_node *fw_features,
2246 const char *state, const char *name)
2247{
2248 struct device_node *np;
2249 bool r = false;
2250
2251 np = of_get_child_by_name(fw_features, name);
2252 if (np) {
2253 r = of_property_read_bool(np, state);
2254 of_node_put(np);
2255 }
2256 return r;
2257}
2258
2259static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2260{
2261 struct device_node *np, *fw_features;
2262 int r;
2263
2264 memset(cp, 0, sizeof(*cp));
2265 r = pseries_get_cpu_char(cp);
2266 if (r != -ENOTTY)
2267 return r;
2268
2269 np = of_find_node_by_name(NULL, "ibm,opal");
2270 if (np) {
2271 fw_features = of_get_child_by_name(np, "fw-features");
2272 of_node_put(np);
2273 if (!fw_features)
2274 return 0;
2275 if (have_fw_feat(fw_features, "enabled",
2276 "inst-spec-barrier-ori31,31,0"))
2277 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2278 if (have_fw_feat(fw_features, "enabled",
2279 "fw-bcctrl-serialized"))
2280 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2281 if (have_fw_feat(fw_features, "enabled",
2282 "inst-l1d-flush-ori30,30,0"))
2283 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2284 if (have_fw_feat(fw_features, "enabled",
2285 "inst-l1d-flush-trig2"))
2286 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2287 if (have_fw_feat(fw_features, "enabled",
2288 "fw-l1d-thread-split"))
2289 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2290 if (have_fw_feat(fw_features, "enabled",
2291 "fw-count-cache-disabled"))
2292 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002293 if (have_fw_feat(fw_features, "enabled",
2294 "fw-count-cache-flush-bcctr2,0,0"))
2295 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
Paul Mackerras3214d012018-01-15 16:06:47 +11002296 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2297 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2298 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2299 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2300 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002301 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2302 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
Paul Mackerras3214d012018-01-15 16:06:47 +11002303
2304 if (have_fw_feat(fw_features, "enabled",
2305 "speculation-policy-favor-security"))
2306 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2307 if (!have_fw_feat(fw_features, "disabled",
2308 "needs-l1d-flush-msr-pr-0-to-1"))
2309 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2310 if (!have_fw_feat(fw_features, "disabled",
2311 "needs-spec-barrier-for-bound-checks"))
2312 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002313 if (have_fw_feat(fw_features, "enabled",
2314 "needs-count-cache-flush-on-context-switch"))
2315 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
Paul Mackerras3214d012018-01-15 16:06:47 +11002316 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2317 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002318 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2319 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
Paul Mackerras3214d012018-01-15 16:06:47 +11002320
2321 of_node_put(fw_features);
2322 }
2323
2324 return 0;
2325}
2326#endif
2327
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002328long kvm_arch_vm_ioctl(struct file *filp,
2329 unsigned int ioctl, unsigned long arg)
2330{
Scott Wood5df554ad2013-04-12 14:08:46 +00002331 struct kvm *kvm __maybe_unused = filp->private_data;
Alexander Graf15711e92010-07-29 14:48:08 +02002332 void __user *argp = (void __user *)arg;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002333 long r;
2334
2335 switch (ioctl) {
Alexander Graf15711e92010-07-29 14:48:08 +02002336 case KVM_PPC_GET_PVINFO: {
2337 struct kvm_ppc_pvinfo pvinfo;
Vasiliy Kulikovd8cdddc2010-10-30 13:04:24 +04002338 memset(&pvinfo, 0, sizeof(pvinfo));
Alexander Graf15711e92010-07-29 14:48:08 +02002339 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2340 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2341 r = -EFAULT;
2342 goto out;
2343 }
2344
2345 break;
2346 }
Paul Mackerras76d837a2017-05-11 14:31:59 +10002347#ifdef CONFIG_SPAPR_TCE_IOMMU
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11002348 case KVM_CREATE_SPAPR_TCE_64: {
2349 struct kvm_create_spapr_tce_64 create_tce_64;
2350
2351 r = -EFAULT;
2352 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2353 goto out;
2354 if (create_tce_64.flags) {
2355 r = -EINVAL;
2356 goto out;
2357 }
2358 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2359 goto out;
2360 }
David Gibson54738c02011-06-29 00:22:41 +00002361 case KVM_CREATE_SPAPR_TCE: {
2362 struct kvm_create_spapr_tce create_tce;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11002363 struct kvm_create_spapr_tce_64 create_tce_64;
David Gibson54738c02011-06-29 00:22:41 +00002364
2365 r = -EFAULT;
2366 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2367 goto out;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11002368
2369 create_tce_64.liobn = create_tce.liobn;
2370 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2371 create_tce_64.offset = 0;
2372 create_tce_64.size = create_tce.window_size >>
2373 IOMMU_PAGE_SHIFT_4K;
2374 create_tce_64.flags = 0;
2375 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
David Gibson54738c02011-06-29 00:22:41 +00002376 goto out;
2377 }
Paul Mackerras76d837a2017-05-11 14:31:59 +10002378#endif
2379#ifdef CONFIG_PPC_BOOK3S_64
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00002380 case KVM_PPC_GET_SMMU_INFO: {
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00002381 struct kvm_ppc_smmu_info info;
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302382 struct kvm *kvm = filp->private_data;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00002383
2384 memset(&info, 0, sizeof(info));
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302385 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00002386 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2387 r = -EFAULT;
2388 break;
2389 }
Michael Ellerman8e591cb2013-04-17 20:30:00 +00002390 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2391 struct kvm *kvm = filp->private_data;
2392
2393 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2394 break;
2395 }
Paul Mackerrasc9270132017-01-30 21:21:41 +11002396 case KVM_PPC_CONFIGURE_V3_MMU: {
2397 struct kvm *kvm = filp->private_data;
2398 struct kvm_ppc_mmuv3_cfg cfg;
2399
2400 r = -EINVAL;
2401 if (!kvm->arch.kvm_ops->configure_mmu)
2402 goto out;
2403 r = -EFAULT;
2404 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2405 goto out;
2406 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2407 break;
2408 }
2409 case KVM_PPC_GET_RMMU_INFO: {
2410 struct kvm *kvm = filp->private_data;
2411 struct kvm_ppc_rmmu_info info;
2412
2413 r = -EINVAL;
2414 if (!kvm->arch.kvm_ops->get_rmmu_info)
2415 goto out;
2416 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2417 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2418 r = -EFAULT;
2419 break;
2420 }
Paul Mackerras3214d012018-01-15 16:06:47 +11002421 case KVM_PPC_GET_CPU_CHAR: {
2422 struct kvm_ppc_cpu_char cpuchar;
2423
2424 r = kvmppc_get_cpu_char(&cpuchar);
2425 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2426 r = -EFAULT;
2427 break;
2428 }
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302429 default: {
2430 struct kvm *kvm = filp->private_data;
2431 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2432 }
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302433#else /* CONFIG_PPC_BOOK3S_64 */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002434 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002435 r = -ENOTTY;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302436#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002437 }
Alexander Graf15711e92010-07-29 14:48:08 +02002438out:
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002439 return r;
2440}
2441
Scott Wood043cc4d2011-12-20 15:34:20 +00002442static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2443static unsigned long nr_lpids;
2444
2445long kvmppc_alloc_lpid(void)
2446{
2447 long lpid;
2448
2449 do {
2450 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2451 if (lpid >= nr_lpids) {
2452 pr_err("%s: No LPIDs free\n", __func__);
2453 return -ENOMEM;
2454 }
2455 } while (test_and_set_bit(lpid, lpid_inuse));
2456
2457 return lpid;
2458}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05302459EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00002460
2461void kvmppc_claim_lpid(long lpid)
2462{
2463 set_bit(lpid, lpid_inuse);
2464}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05302465EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00002466
2467void kvmppc_free_lpid(long lpid)
2468{
2469 clear_bit(lpid, lpid_inuse);
2470}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05302471EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00002472
2473void kvmppc_init_lpid(unsigned long nr_lpids_param)
2474{
2475 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2476 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2477}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05302478EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00002479
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002480int kvm_arch_init(void *opaque)
2481{
2482 return 0;
2483}
2484
Paolo Bonzini478d66862014-08-05 11:29:07 +02002485EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);