blob: 838cdcd2db12d3638d4dc24624abcc01909c3d0c [file] [log] [blame]
Thomas Gleixnerd94d71c2019-05-29 07:12:40 -07001// SPDX-License-Identifier: GPL-2.0-only
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002/*
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05003 *
4 * Copyright IBM Corp. 2007
5 *
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8 */
9
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/kvm_host.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050013#include <linux/vmalloc.h>
Alexander Graf544c6762009-11-02 12:02:31 +000014#include <linux/hrtimer.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010015#include <linux/sched/signal.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050016#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Scott Woodeb1e4f42013-04-12 14:08:47 +000018#include <linux/file.h>
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +053019#include <linux/module.h>
Suresh Warrier95767302016-08-19 15:35:47 +100020#include <linux/irqbypass.h>
21#include <linux/kvm_irqfd.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022#include <asm/cputable.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080023#include <linux/uaccess.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050024#include <asm/kvm_ppc.h>
Paul Mackerras371fefd2011-06-29 00:23:08 +000025#include <asm/cputhreads.h>
Alexander Grafbd2be682012-08-13 01:04:19 +020026#include <asm/irqflags.h>
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +110027#include <asm/iommu.h>
Bin Lu6f63e812017-02-21 21:12:36 +080028#include <asm/switch_to.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100029#include <asm/xive.h>
Paul Mackerras3214d012018-01-15 16:06:47 +110030#ifdef CONFIG_PPC_PSERIES
31#include <asm/hvcall.h>
32#include <asm/plpar_wrappers.h>
33#endif
Bharata B Rao22945682019-11-25 08:36:30 +053034#include <asm/ultravisor.h>
35#include <asm/kvm_host.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100036
Hollis Blanchard73e75b42008-12-02 15:51:57 -060037#include "timing.h"
Alexander Graf5efdb4b2013-04-17 00:37:57 +020038#include "irq.h"
Paul Mackerrasfad7b9b2008-12-23 14:57:26 +110039#include "../mm/mmu_decl.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Marcelo Tosatti46f43c62009-06-18 11:47:27 -030041#define CREATE_TRACE_POINTS
42#include "trace.h"
43
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +053044struct kvmppc_ops *kvmppc_hv_ops;
45EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46struct kvmppc_ops *kvmppc_pr_ops;
47EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053049
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050050int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51{
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +020052 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050053}
54
Wanpeng Li17e433b2019-08-05 10:03:19 +080055bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
56{
57 return kvm_arch_vcpu_runnable(vcpu);
58}
59
Longpeng(Mike)199b5762017-08-08 12:05:32 +080060bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
61{
62 return false;
63}
64
Christoffer Dallb6d33832012-03-08 16:44:24 -050065int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
66{
67 return 1;
68}
69
Alexander Graf03d25c52012-08-10 12:28:50 +020070/*
71 * Common checks before entering the guest world. Call with interrupts
72 * disabled.
73 *
Alexander Graf7ee78852012-08-13 12:44:41 +020074 * returns:
75 *
76 * == 1 if we're ready to go into guest state
77 * <= 0 if we need to go back to the host with return value
Alexander Graf03d25c52012-08-10 12:28:50 +020078 */
79int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
80{
Scott Wood6c85f522014-01-09 19:18:40 -060081 int r;
Alexander Graf03d25c52012-08-10 12:28:50 +020082
Scott Wood6c85f522014-01-09 19:18:40 -060083 WARN_ON(irqs_disabled());
84 hard_irq_disable();
85
Alexander Graf03d25c52012-08-10 12:28:50 +020086 while (true) {
87 if (need_resched()) {
88 local_irq_enable();
89 cond_resched();
Scott Wood6c85f522014-01-09 19:18:40 -060090 hard_irq_disable();
Alexander Graf03d25c52012-08-10 12:28:50 +020091 continue;
92 }
93
94 if (signal_pending(current)) {
Alexander Graf7ee78852012-08-13 12:44:41 +020095 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
96 vcpu->run->exit_reason = KVM_EXIT_INTR;
97 r = -EINTR;
Alexander Graf03d25c52012-08-10 12:28:50 +020098 break;
99 }
100
Scott Wood5bd1cf12012-08-22 15:03:50 +0000101 vcpu->mode = IN_GUEST_MODE;
102
103 /*
104 * Reading vcpu->requests must happen after setting vcpu->mode,
105 * so we don't miss a request because the requester sees
106 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
107 * before next entering the guest (and thus doesn't IPI).
Lan Tianyu489153c2016-03-13 11:10:30 +0800108 * This also orders the write to mode from any reads
109 * to the page tables done while the VCPU is running.
110 * Please see the comment in kvm_flush_remote_tlbs.
Scott Wood5bd1cf12012-08-22 15:03:50 +0000111 */
Alexander Graf03d25c52012-08-10 12:28:50 +0200112 smp_mb();
Scott Wood5bd1cf12012-08-22 15:03:50 +0000113
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +0200114 if (kvm_request_pending(vcpu)) {
Alexander Graf03d25c52012-08-10 12:28:50 +0200115 /* Make sure we process requests preemptable */
116 local_irq_enable();
117 trace_kvm_check_requests(vcpu);
Alexander Graf7c973a22012-08-13 12:50:35 +0200118 r = kvmppc_core_check_requests(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -0600119 hard_irq_disable();
Alexander Graf7c973a22012-08-13 12:50:35 +0200120 if (r > 0)
121 continue;
122 break;
Alexander Graf03d25c52012-08-10 12:28:50 +0200123 }
124
125 if (kvmppc_core_prepare_to_enter(vcpu)) {
126 /* interrupts got enabled in between, so we
127 are back at square 1 */
128 continue;
129 }
130
Paolo Bonzini6edaa532016-06-15 15:18:26 +0200131 guest_enter_irqoff();
Scott Wood6c85f522014-01-09 19:18:40 -0600132 return 1;
Alexander Graf03d25c52012-08-10 12:28:50 +0200133 }
134
Scott Wood6c85f522014-01-09 19:18:40 -0600135 /* return to host */
136 local_irq_enable();
Alexander Graf03d25c52012-08-10 12:28:50 +0200137 return r;
138}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530139EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
Alexander Graf03d25c52012-08-10 12:28:50 +0200140
Alexander Graf5deb8e72014-04-24 13:46:24 +0200141#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
142static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
143{
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
145 int i;
146
147 shared->sprg0 = swab64(shared->sprg0);
148 shared->sprg1 = swab64(shared->sprg1);
149 shared->sprg2 = swab64(shared->sprg2);
150 shared->sprg3 = swab64(shared->sprg3);
151 shared->srr0 = swab64(shared->srr0);
152 shared->srr1 = swab64(shared->srr1);
153 shared->dar = swab64(shared->dar);
154 shared->msr = swab64(shared->msr);
155 shared->dsisr = swab32(shared->dsisr);
156 shared->int_pending = swab32(shared->int_pending);
157 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
158 shared->sr[i] = swab32(shared->sr[i]);
159}
160#endif
161
Alexander Graf2a342ed2010-07-29 14:47:48 +0200162int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
163{
164 int nr = kvmppc_get_gpr(vcpu, 11);
165 int r;
166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
170 unsigned long r2 = 0;
171
Alexander Graf5deb8e72014-04-24 13:46:24 +0200172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
Alexander Graf2a342ed2010-07-29 14:47:48 +0200173 /* 32 bit mode */
174 param1 &= 0xffffffff;
175 param2 &= 0xffffffff;
176 param3 &= 0xffffffff;
177 param4 &= 0xffffffff;
178 }
179
180 switch (nr) {
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000181 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
Alexander Graf5fc87402010-07-29 14:47:55 +0200182 {
Alexander Graf5deb8e72014-04-24 13:46:24 +0200183#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
184 /* Book3S can be little endian, find it out here */
185 int shared_big_endian = true;
186 if (vcpu->arch.intr_msr & MSR_LE)
187 shared_big_endian = false;
188 if (shared_big_endian != vcpu->arch.shared_big_endian)
189 kvmppc_swab_shared(vcpu);
190 vcpu->arch.shared_big_endian = shared_big_endian;
191#endif
192
Alexander Graff3383cf2014-05-12 01:08:32 +0200193 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
194 /*
195 * Older versions of the Linux magic page code had
196 * a bug where they would map their trampoline code
197 * NX. If that's the case, remove !PR NX capability.
198 */
199 vcpu->arch.disable_kernel_nx = true;
200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
201 }
202
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
Alexander Graf5fc87402010-07-29 14:47:55 +0200205
Alexander Graf89b68c92014-07-13 16:37:12 +0200206#ifdef CONFIG_PPC_64K_PAGES
207 /*
208 * Make sure our 4k magic page is in the same window of a 64k
209 * page within the guest and within the host's page.
210 */
211 if ((vcpu->arch.magic_page_pa & 0xf000) !=
212 ((ulong)vcpu->arch.shared & 0xf000)) {
213 void *old_shared = vcpu->arch.shared;
214 ulong shared = (ulong)vcpu->arch.shared;
215 void *new_shared;
216
217 shared &= PAGE_MASK;
218 shared |= vcpu->arch.magic_page_pa & 0xf000;
219 new_shared = (void*)shared;
220 memcpy(new_shared, old_shared, 0x1000);
221 vcpu->arch.shared = new_shared;
222 }
223#endif
224
Scott Woodb5904972011-11-08 18:23:30 -0600225 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
Alexander Graf7508e162010-08-03 11:32:56 +0200226
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000227 r = EV_SUCCESS;
Alexander Graf5fc87402010-07-29 14:47:55 +0200228 break;
229 }
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000230 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
231 r = EV_SUCCESS;
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000232#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
Alexander Graf5fc87402010-07-29 14:47:55 +0200233 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
234#endif
Alexander Graf2a342ed2010-07-29 14:47:48 +0200235
236 /* Second return value is in r4 */
Alexander Graf2a342ed2010-07-29 14:47:48 +0200237 break;
Liu Yu-B132019202e072012-07-03 05:48:52 +0000238 case EV_HCALL_TOKEN(EV_IDLE):
239 r = EV_SUCCESS;
240 kvm_vcpu_block(vcpu);
Radim Krčmář72875d82017-04-26 22:32:19 +0200241 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Liu Yu-B132019202e072012-07-03 05:48:52 +0000242 break;
Alexander Graf2a342ed2010-07-29 14:47:48 +0200243 default:
Stuart Yoderfdcf8bd2012-07-03 05:48:50 +0000244 r = EV_UNIMPLEMENTED;
Alexander Graf2a342ed2010-07-29 14:47:48 +0200245 break;
246 }
247
Alexander Graf7508e162010-08-03 11:32:56 +0200248 kvmppc_set_gpr(vcpu, 4, r2);
249
Alexander Graf2a342ed2010-07-29 14:47:48 +0200250 return r;
251}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530252EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500253
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200254int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
255{
256 int r = false;
257
258 /* We have to know what CPU to virtualize */
259 if (!vcpu->arch.pvr)
260 goto out;
261
262 /* PAPR only works with book3s_64 */
263 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
264 goto out;
265
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200266 /* HV KVM can only do PAPR mode for now */
Aneesh Kumar K.Va78b55d2013-10-07 22:18:02 +0530267 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200268 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200269
Scott Woodd30f6e42011-12-20 15:34:43 +0000270#ifdef CONFIG_KVM_BOOKE_HV
271 if (!cpu_has_feature(CPU_FTR_EMB_HV))
272 goto out;
273#endif
274
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200275 r = true;
276
277out:
278 vcpu->arch.sane = r;
279 return r ? 0 : -EINVAL;
280}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530281EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200282
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500283int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
284{
285 enum emulation_result er;
286 int r;
287
Alexander Grafd69614a2014-06-18 14:53:49 +0200288 er = kvmppc_emulate_loadstore(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500289 switch (er) {
290 case EMULATE_DONE:
291 /* Future optimization: only reload non-volatiles if they were
292 * actually modified. */
293 r = RESUME_GUEST_NV;
294 break;
Mihai Caraman51f04722014-07-23 19:06:21 +0300295 case EMULATE_AGAIN:
296 r = RESUME_GUEST;
297 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500298 case EMULATE_DO_MMIO:
299 run->exit_reason = KVM_EXIT_MMIO;
300 /* We must reload nonvolatiles because "update" load/store
301 * instructions modify register state. */
302 /* Future optimization: only reload non-volatiles if they were
303 * actually modified. */
304 r = RESUME_HOST_NV;
305 break;
306 case EMULATE_FAIL:
Mihai Caraman51f04722014-07-23 19:06:21 +0300307 {
308 u32 last_inst;
309
Alexander Graf8d0eff62014-09-10 14:37:29 +0200310 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500311 /* XXX Deliver Program interrupt to guest. */
Mihai Caraman51f04722014-07-23 19:06:21 +0300312 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500313 r = RESUME_HOST;
314 break;
Mihai Caraman51f04722014-07-23 19:06:21 +0300315 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500316 default:
Alexander Graf5a331692012-12-14 23:46:03 +0100317 WARN_ON(1);
318 r = RESUME_GUEST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500319 }
320
321 return r;
322}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530323EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500324
Alexander Graf35c4a732014-06-20 13:58:16 +0200325int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
326 bool data)
327{
Alexander Grafc12fb432014-06-20 14:43:36 +0200328 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
Alexander Graf35c4a732014-06-20 13:58:16 +0200329 struct kvmppc_pte pte;
Suraj Jitindar Singhcc6929c2018-12-14 16:29:07 +1100330 int r = -EINVAL;
Alexander Graf35c4a732014-06-20 13:58:16 +0200331
332 vcpu->stat.st++;
333
Suraj Jitindar Singhcc6929c2018-12-14 16:29:07 +1100334 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
335 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
336 size);
337
338 if ((!r) || (r == -EAGAIN))
339 return r;
340
Alexander Graf35c4a732014-06-20 13:58:16 +0200341 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
342 XLATE_WRITE, &pte);
343 if (r < 0)
344 return r;
345
346 *eaddr = pte.raddr;
347
348 if (!pte.may_write)
349 return -EPERM;
350
Alexander Grafc12fb432014-06-20 14:43:36 +0200351 /* Magic page override */
352 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
353 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
354 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
355 void *magic = vcpu->arch.shared;
356 magic += pte.eaddr & 0xfff;
357 memcpy(magic, ptr, size);
358 return EMULATE_DONE;
359 }
360
Alexander Graf35c4a732014-06-20 13:58:16 +0200361 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
362 return EMULATE_DO_MMIO;
363
364 return EMULATE_DONE;
365}
366EXPORT_SYMBOL_GPL(kvmppc_st);
367
368int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
369 bool data)
370{
Alexander Grafc12fb432014-06-20 14:43:36 +0200371 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
Alexander Graf35c4a732014-06-20 13:58:16 +0200372 struct kvmppc_pte pte;
Suraj Jitindar Singhcc6929c2018-12-14 16:29:07 +1100373 int rc = -EINVAL;
Alexander Graf35c4a732014-06-20 13:58:16 +0200374
375 vcpu->stat.ld++;
376
Suraj Jitindar Singhcc6929c2018-12-14 16:29:07 +1100377 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
378 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
379 size);
380
381 if ((!rc) || (rc == -EAGAIN))
382 return rc;
383
Alexander Graf35c4a732014-06-20 13:58:16 +0200384 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
385 XLATE_READ, &pte);
386 if (rc)
387 return rc;
388
389 *eaddr = pte.raddr;
390
391 if (!pte.may_read)
392 return -EPERM;
393
394 if (!data && !pte.may_execute)
395 return -ENOEXEC;
396
Alexander Grafc12fb432014-06-20 14:43:36 +0200397 /* Magic page override */
398 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
399 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
400 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
401 void *magic = vcpu->arch.shared;
402 magic += pte.eaddr & 0xfff;
403 memcpy(ptr, magic, size);
404 return EMULATE_DONE;
405 }
406
Alexander Grafc45c5512014-06-20 14:17:30 +0200407 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
408 return EMULATE_DO_MMIO;
Alexander Graf35c4a732014-06-20 13:58:16 +0200409
410 return EMULATE_DONE;
Alexander Graf35c4a732014-06-20 13:58:16 +0200411}
412EXPORT_SYMBOL_GPL(kvmppc_ld);
413
Radim Krčmář13a34e02014-08-28 15:13:03 +0200414int kvm_arch_hardware_enable(void)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500415{
Alexander Graf10474ae2009-09-15 11:37:46 +0200416 return 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500417}
418
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500419int kvm_arch_hardware_setup(void)
420{
421 return 0;
422}
423
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700424int kvm_arch_check_processor_compat(void)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500425{
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700426 return kvmppc_core_check_processor_compat();
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500427}
428
Carsten Ottee08b9632012-01-04 10:25:20 +0100429int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500430{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530431 struct kvmppc_ops *kvm_ops = NULL;
432 /*
433 * if we have both HV and PR enabled, default is HV
434 */
435 if (type == 0) {
436 if (kvmppc_hv_ops)
437 kvm_ops = kvmppc_hv_ops;
438 else
439 kvm_ops = kvmppc_pr_ops;
440 if (!kvm_ops)
441 goto err_out;
442 } else if (type == KVM_VM_PPC_HV) {
443 if (!kvmppc_hv_ops)
444 goto err_out;
445 kvm_ops = kvmppc_hv_ops;
446 } else if (type == KVM_VM_PPC_PR) {
447 if (!kvmppc_pr_ops)
448 goto err_out;
449 kvm_ops = kvmppc_pr_ops;
450 } else
451 goto err_out;
Carsten Ottee08b9632012-01-04 10:25:20 +0100452
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530453 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
454 return -ENOENT;
455
456 kvm->arch.kvm_ops = kvm_ops;
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000457 return kvmppc_core_init_vm(kvm);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530458err_out:
459 return -EINVAL;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500460}
461
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100462void kvm_arch_destroy_vm(struct kvm *kvm)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500463{
464 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300465 struct kvm_vcpu *vcpu;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500466
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600467#ifdef CONFIG_KVM_XICS
468 /*
469 * We call kick_all_cpus_sync() to ensure that all
470 * CPUs have executed any pending IPIs before we
471 * continue and free VCPUs structures below.
472 */
473 if (is_kvmppc_hv_enabled(kvm))
474 kick_all_cpus_sync();
475#endif
476
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300477 kvm_for_each_vcpu(i, vcpu, kvm)
Sean Christopherson4543bdc2019-12-18 13:55:14 -0800478 kvm_vcpu_destroy(vcpu);
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300479
480 mutex_lock(&kvm->lock);
481 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
482 kvm->vcpus[i] = NULL;
483
484 atomic_set(&kvm->online_vcpus, 0);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000485
486 kvmppc_core_destroy_vm(kvm);
487
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300488 mutex_unlock(&kvm->lock);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530489
490 /* drop the module reference */
491 module_put(kvm->arch.kvm_ops->owner);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500492}
493
Alexander Graf784aa3d2014-07-14 18:27:35 +0200494int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500495{
496 int r;
Alexander Graf7a587772014-07-14 18:55:19 +0200497 /* Assume we're using HV mode when the HV module is loaded */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530498 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500499
Alexander Graf7a587772014-07-14 18:55:19 +0200500 if (kvm) {
501 /*
502 * Hooray - we know which VM type we're running on. Depend on
503 * that rather than the guess above.
504 */
505 hv_enabled = is_kvmppc_hv_enabled(kvm);
506 }
507
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500508 switch (ext) {
Scott Wood5ce941e2011-04-27 17:24:21 -0500509#ifdef CONFIG_BOOKE
510 case KVM_CAP_PPC_BOOKE_SREGS:
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000511 case KVM_CAP_PPC_BOOKE_WATCHDOG:
Alexander Graf1c810632013-01-04 18:12:48 +0100512 case KVM_CAP_PPC_EPR:
Scott Wood5ce941e2011-04-27 17:24:21 -0500513#else
Alexander Grafe15a1132009-11-30 03:02:02 +0000514 case KVM_CAP_PPC_SEGSTATE:
Alexander Graf1022fc32011-09-14 21:45:23 +0200515 case KVM_CAP_PPC_HIOR:
Alexander Graf930b4122011-08-08 17:29:42 +0200516 case KVM_CAP_PPC_PAPR:
Scott Wood5ce941e2011-04-27 17:24:21 -0500517#endif
Alexander Graf18978762010-03-24 21:48:18 +0100518 case KVM_CAP_PPC_UNSET_IRQ:
Alexander Graf7b4203e2010-08-30 13:50:45 +0200519 case KVM_CAP_PPC_IRQ_LEVEL:
Alexander Graf71fbfd52010-03-24 21:48:29 +0100520 case KVM_CAP_ENABLE_CAP:
Alexander Grafe24ed812011-09-14 10:02:41 +0200521 case KVM_CAP_ONE_REG:
Alexander Graf0e673fb2012-10-09 00:06:20 +0200522 case KVM_CAP_IOEVENTFD:
Scott Wood5df554ad2013-04-12 14:08:46 +0000523 case KVM_CAP_DEVICE_CTRL:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100524 case KVM_CAP_IMMEDIATE_EXIT:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000525 r = 1;
526 break;
Fabiano Rosas1a9167a2019-06-19 13:01:27 -0300527 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
528 /* fall through */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000529 case KVM_CAP_PPC_PAIRED_SINGLES:
Alexander Grafad0a0482010-03-24 21:48:30 +0100530 case KVM_CAP_PPC_OSI:
Alexander Graf15711e92010-07-29 14:48:08 +0200531 case KVM_CAP_PPC_GET_PVINFO:
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000532#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -0500533 case KVM_CAP_SW_TLB:
534#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530535 /* We support this only for PR */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530536 r = !hv_enabled;
Alexander Grafe15a1132009-11-30 03:02:02 +0000537 break;
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530538#ifdef CONFIG_KVM_MPIC
539 case KVM_CAP_IRQ_MPIC:
540 r = 1;
541 break;
542#endif
543
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000544#ifdef CONFIG_PPC_BOOK3S_64
David Gibson54738c02011-06-29 00:22:41 +0000545 case KVM_CAP_SPAPR_TCE:
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +1100546 case KVM_CAP_SPAPR_TCE_64:
Suraj Jitindar Singh693ac102018-12-14 16:29:03 +1100547 r = 1;
548 break;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100549 case KVM_CAP_SPAPR_TCE_VFIO:
Suraj Jitindar Singh693ac102018-12-14 16:29:03 +1100550 r = !!cpu_has_feature(CPU_FTR_HVMODE);
551 break;
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000552 case KVM_CAP_PPC_RTAS:
Alexander Graff2e91042014-05-22 17:40:15 +0200553 case KVM_CAP_PPC_FIXUP_HCALL:
Paul Mackerras699a0ea2014-06-02 11:02:59 +1000554 case KVM_CAP_PPC_ENABLE_HCALL:
Paul Mackerras5975a2e2013-04-27 00:28:37 +0000555#ifdef CONFIG_KVM_XICS
556 case KVM_CAP_IRQ_XICS:
557#endif
Paul Mackerras3214d012018-01-15 16:06:47 +1100558 case KVM_CAP_PPC_GET_CPU_CHAR:
David Gibson54738c02011-06-29 00:22:41 +0000559 r = 1;
560 break;
Cédric Le Goatereacc56b2019-04-18 12:39:28 +0200561#ifdef CONFIG_KVM_XIVE
562 case KVM_CAP_PPC_IRQ_XIVE:
563 /*
Cédric Le Goater3fab2d12019-04-18 12:39:40 +0200564 * We need XIVE to be enabled on the platform (implies
565 * a POWER9 processor) and the PowerNV platform, as
566 * nested is not yet supported.
Cédric Le Goatereacc56b2019-04-18 12:39:28 +0200567 */
Paul Mackerras2ad7a272019-08-26 16:21:21 +1000568 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
569 kvmppc_xive_native_supported();
Cédric Le Goatereacc56b2019-04-18 12:39:28 +0200570 break;
571#endif
David Gibsona8acaec2016-11-23 16:14:07 +1100572
573 case KVM_CAP_PPC_ALLOC_HTAB:
574 r = hv_enabled;
575 break;
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000576#endif /* CONFIG_PPC_BOOK3S_64 */
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530577#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerras371fefd2011-06-29 00:23:08 +0000578 case KVM_CAP_PPC_SMT:
Paul Mackerras45c940b2016-11-18 17:43:30 +1100579 r = 0;
Paul Mackerras57900692017-05-16 16:41:20 +1000580 if (kvm) {
581 if (kvm->arch.emul_smt_mode > 1)
582 r = kvm->arch.emul_smt_mode;
583 else
584 r = kvm->arch.smt_mode;
585 } else if (hv_enabled) {
Paul Mackerras45c940b2016-11-18 17:43:30 +1100586 if (cpu_has_feature(CPU_FTR_ARCH_300))
587 r = 1;
588 else
589 r = threads_per_subcore;
590 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000591 break;
Paul Mackerras2ed4f9d2017-06-21 16:01:27 +1000592 case KVM_CAP_PPC_SMT_POSSIBLE:
593 r = 1;
594 if (hv_enabled) {
595 if (!cpu_has_feature(CPU_FTR_ARCH_300))
596 r = ((threads_per_subcore << 1) - 1);
597 else
598 /* P9 can emulate dbells, so allow any mode */
599 r = 8 | 4 | 2 | 1;
600 }
601 break;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000602 case KVM_CAP_PPC_RMA:
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100603 r = 0;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000604 break;
Michael Ellermane928e9c2015-03-20 20:39:41 +1100605 case KVM_CAP_PPC_HWRNG:
606 r = kvmppc_hwrng_present();
607 break;
Paul Mackerrasc9270132017-01-30 21:21:41 +1100608 case KVM_CAP_PPC_MMU_RADIX:
Paul Mackerras8cf4ecc2017-01-30 21:21:53 +1100609 r = !!(hv_enabled && radix_enabled());
Paul Mackerrasc9270132017-01-30 21:21:41 +1100610 break;
611 case KVM_CAP_PPC_MMU_HASH_V3:
Paul Mackerrasde760db2018-10-08 16:31:16 +1100612 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
613 cpu_has_feature(CPU_FTR_HVMODE));
Paul Mackerrasc9270132017-01-30 21:21:41 +1100614 break;
Paul Mackerrasaa069a92018-09-21 20:02:01 +1000615 case KVM_CAP_PPC_NESTED_HV:
616 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
617 !kvmppc_hv_ops->enable_nested(NULL));
618 break;
David Gibson54738c02011-06-29 00:22:41 +0000619#endif
Alexander Graff4800b12012-08-07 10:24:14 +0200620 case KVM_CAP_SYNC_MMU:
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530621#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100622 r = hv_enabled;
Alexander Graff4800b12012-08-07 10:24:14 +0200623#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
624 r = 1;
625#else
626 r = 0;
Paul Mackerrasa2932922012-11-19 22:57:20 +0000627#endif
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530628 break;
629#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasa2932922012-11-19 22:57:20 +0000630 case KVM_CAP_PPC_HTAB_FD:
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530631 r = hv_enabled;
Paul Mackerrasa2932922012-11-19 22:57:20 +0000632 break;
Alexander Graff4800b12012-08-07 10:24:14 +0200633#endif
Matt Evansb5434032011-12-07 16:55:57 +0000634 case KVM_CAP_NR_VCPUS:
635 /*
636 * Recommending a number of CPUs is somewhat arbitrary; we
637 * return the number of present CPUs for -HV (since a host
638 * will have secondary threads "offline"), and for other KVM
639 * implementations just count online CPUs.
640 */
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530641 if (hv_enabled)
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530642 r = num_present_cpus();
643 else
644 r = num_online_cpus();
Matt Evansb5434032011-12-07 16:55:57 +0000645 break;
646 case KVM_CAP_MAX_VCPUS:
647 r = KVM_MAX_VCPUS;
648 break;
Thomas Hutha86cb412019-05-23 18:43:08 +0200649 case KVM_CAP_MAX_VCPU_ID:
650 r = KVM_MAX_VCPU_ID;
651 break;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000652#ifdef CONFIG_PPC_BOOK3S_64
653 case KVM_CAP_PPC_GET_SMMU_INFO:
654 r = 1;
655 break;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100656 case KVM_CAP_SPAPR_MULTITCE:
657 r = 1;
658 break;
David Gibson050f2332016-12-20 16:49:07 +1100659 case KVM_CAP_SPAPR_RESIZE_HPT:
David Gibson790a9df2018-02-02 14:29:08 +1100660 r = !!hv_enabled;
David Gibson050f2332016-12-20 16:49:07 +1100661 break;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000662#endif
Aravinda Prasad134764e2017-05-11 16:32:48 +0530663#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
664 case KVM_CAP_PPC_FWNMI:
665 r = hv_enabled;
666 break;
667#endif
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100668#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Sam Bobroff23528bb2016-07-20 13:41:36 +1000669 case KVM_CAP_PPC_HTM:
Simon Guod234d682018-05-23 15:02:08 +0800670 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
671 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
Sam Bobroff23528bb2016-07-20 13:41:36 +1000672 break;
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100673#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500674 default:
675 r = 0;
676 break;
677 }
678 return r;
679
680}
681
682long kvm_arch_dev_ioctl(struct file *filp,
683 unsigned int ioctl, unsigned long arg)
684{
685 return -EINVAL;
686}
687
Sean Christophersone96c81e2020-02-18 13:07:27 -0800688void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900689{
Sean Christophersone96c81e2020-02-18 13:07:27 -0800690 kvmppc_core_free_memslot(kvm, slot);
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900691}
692
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200693int kvm_arch_prepare_memory_region(struct kvm *kvm,
Takuya Yoshikawa462fce42013-02-27 19:41:56 +0900694 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200695 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +0900696 enum kvm_mr_change change)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500697{
Sean Christopherson82307e62020-02-18 13:07:18 -0800698 return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500699}
700
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200701void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200702 const struct kvm_userspace_memory_region *mem,
Sean Christopherson9d4c1972020-02-18 13:07:24 -0800703 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200704 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900705 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200706{
Bharata B Raof032b732018-12-12 15:15:30 +1100707 kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200708}
709
Marcelo Tosatti2df72e92012-08-24 15:54:57 -0300710void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
711 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300712{
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000713 kvmppc_core_flush_memslot(kvm, slot);
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300714}
715
Sean Christopherson897cc382019-12-18 13:55:09 -0800716int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
717{
718 return 0;
719}
720
Sean Christopherson74ce2e62019-12-18 13:55:26 -0800721static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
722{
723 struct kvm_vcpu *vcpu;
724
725 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
726 kvmppc_decrementer_func(vcpu);
727
728 return HRTIMER_NORESTART;
729}
730
Sean Christophersone529ef62019-12-18 13:55:15 -0800731int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500732{
Sean Christophersonc50bfbd2019-12-18 13:54:57 -0800733 int err;
734
Sean Christopherson74ce2e62019-12-18 13:55:26 -0800735 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
736 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
737 vcpu->arch.dec_expires = get_tb();
738
739#ifdef CONFIG_KVM_EXIT_TIMING
740 mutex_init(&vcpu->arch.exit_timing_lock);
741#endif
742 err = kvmppc_subarch_vcpu_init(vcpu);
Sean Christophersonff030fd2019-12-18 13:55:00 -0800743 if (err)
Sean Christophersone529ef62019-12-18 13:55:15 -0800744 return err;
Sean Christophersonff030fd2019-12-18 13:55:00 -0800745
Sean Christopherson74ce2e62019-12-18 13:55:26 -0800746 err = kvmppc_core_vcpu_create(vcpu);
747 if (err)
748 goto out_vcpu_uninit;
749
Sean Christophersonc50bfbd2019-12-18 13:54:57 -0800750 vcpu->arch.wqp = &vcpu->wq;
Sean Christophersone529ef62019-12-18 13:55:15 -0800751 kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
752 return 0;
Sean Christopherson74ce2e62019-12-18 13:55:26 -0800753
754out_vcpu_uninit:
755 kvmppc_mmu_destroy(vcpu);
756 kvmppc_subarch_vcpu_uninit(vcpu);
757 return err;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500758}
759
Dominik Dingel31928aa2014-12-04 15:47:07 +0100760void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200761{
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200762}
763
Sean Christophersond5279f32019-12-18 13:55:03 -0800764void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500765{
Alexander Grafa5954052010-02-22 16:52:14 +0100766 /* Make sure we're not using the vcpu anymore */
767 hrtimer_cancel(&vcpu->arch.dec_timer);
Alexander Grafa5954052010-02-22 16:52:14 +0100768
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600769 kvmppc_remove_vcpu_debugfs(vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000770
771 switch (vcpu->arch.irq_type) {
772 case KVMPPC_IRQ_MPIC:
773 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
774 break;
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000775 case KVMPPC_IRQ_XICS:
Paul Mackerras03f95332019-02-04 22:07:20 +1100776 if (xics_on_xive())
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000777 kvmppc_xive_cleanup_vcpu(vcpu);
778 else
779 kvmppc_xics_free_icp(vcpu);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000780 break;
Cédric Le Goatereacc56b2019-04-18 12:39:28 +0200781 case KVMPPC_IRQ_XIVE:
782 kvmppc_xive_native_cleanup_vcpu(vcpu);
783 break;
Scott Woodeb1e4f42013-04-12 14:08:47 +0000784 }
785
Hollis Blancharddb93f572008-11-05 09:36:18 -0600786 kvmppc_core_vcpu_free(vcpu);
Sean Christopherson74ce2e62019-12-18 13:55:26 -0800787
788 kvmppc_mmu_destroy(vcpu);
789 kvmppc_subarch_vcpu_uninit(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500790}
791
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500792int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
793{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600794 return kvmppc_core_pending_dec(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500795}
796
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500797void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
798{
Scott Woodeab17672011-04-27 17:24:10 -0500799#ifdef CONFIG_BOOKE
800 /*
801 * vrsave (formerly usprg0) isn't used by Linux, but may
802 * be used by the guest.
803 *
804 * On non-booke this is associated with Altivec and
805 * is handled by code in book3s.c.
806 */
807 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
808#endif
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600809 kvmppc_core_vcpu_load(vcpu, cpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500810}
811
812void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
813{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600814 kvmppc_core_vcpu_put(vcpu);
Scott Woodeab17672011-04-27 17:24:10 -0500815#ifdef CONFIG_BOOKE
816 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
817#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500818}
819
Suresh Warrier95767302016-08-19 15:35:47 +1000820/*
821 * irq_bypass_add_producer and irq_bypass_del_producer are only
822 * useful if the architecture supports PCI passthrough.
823 * irq_bypass_stop and irq_bypass_start are not needed and so
824 * kvm_ops are not defined for them.
825 */
826bool kvm_arch_has_irq_bypass(void)
827{
828 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
829 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
830}
831
832int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
833 struct irq_bypass_producer *prod)
834{
835 struct kvm_kernel_irqfd *irqfd =
836 container_of(cons, struct kvm_kernel_irqfd, consumer);
837 struct kvm *kvm = irqfd->kvm;
838
839 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
840 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
841
842 return 0;
843}
844
845void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
846 struct irq_bypass_producer *prod)
847{
848 struct kvm_kernel_irqfd *irqfd =
849 container_of(cons, struct kvm_kernel_irqfd, consumer);
850 struct kvm *kvm = irqfd->kvm;
851
852 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
853 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
854}
855
Bin Lu6f63e812017-02-21 21:12:36 +0800856#ifdef CONFIG_VSX
857static inline int kvmppc_get_vsr_dword_offset(int index)
858{
859 int offset;
860
861 if ((index != 0) && (index != 1))
862 return -1;
863
864#ifdef __BIG_ENDIAN
865 offset = index;
866#else
867 offset = 1 - index;
868#endif
869
870 return offset;
871}
872
873static inline int kvmppc_get_vsr_word_offset(int index)
874{
875 int offset;
876
877 if ((index > 3) || (index < 0))
878 return -1;
879
880#ifdef __BIG_ENDIAN
881 offset = index;
882#else
883 offset = 3 - index;
884#endif
885 return offset;
886}
887
888static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
889 u64 gpr)
890{
891 union kvmppc_one_reg val;
892 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
893 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
894
895 if (offset == -1)
896 return;
897
Simon Guo4eeb8552018-05-28 09:48:26 +0800898 if (index >= 32) {
899 val.vval = VCPU_VSX_VR(vcpu, index - 32);
Bin Lu6f63e812017-02-21 21:12:36 +0800900 val.vsxval[offset] = gpr;
Simon Guo4eeb8552018-05-28 09:48:26 +0800901 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
Bin Lu6f63e812017-02-21 21:12:36 +0800902 } else {
903 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
904 }
905}
906
907static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
908 u64 gpr)
909{
910 union kvmppc_one_reg val;
911 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
912
Simon Guo4eeb8552018-05-28 09:48:26 +0800913 if (index >= 32) {
914 val.vval = VCPU_VSX_VR(vcpu, index - 32);
Bin Lu6f63e812017-02-21 21:12:36 +0800915 val.vsxval[0] = gpr;
916 val.vsxval[1] = gpr;
Simon Guo4eeb8552018-05-28 09:48:26 +0800917 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
Bin Lu6f63e812017-02-21 21:12:36 +0800918 } else {
919 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
920 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
921 }
922}
923
Simon Guo94dd7fa2018-05-21 13:24:20 +0800924static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
925 u32 gpr)
926{
927 union kvmppc_one_reg val;
928 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
929
Simon Guo4eeb8552018-05-28 09:48:26 +0800930 if (index >= 32) {
Simon Guo94dd7fa2018-05-21 13:24:20 +0800931 val.vsx32val[0] = gpr;
932 val.vsx32val[1] = gpr;
933 val.vsx32val[2] = gpr;
934 val.vsx32val[3] = gpr;
Simon Guo4eeb8552018-05-28 09:48:26 +0800935 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
Simon Guo94dd7fa2018-05-21 13:24:20 +0800936 } else {
937 val.vsx32val[0] = gpr;
938 val.vsx32val[1] = gpr;
939 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
940 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
941 }
942}
943
Bin Lu6f63e812017-02-21 21:12:36 +0800944static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
945 u32 gpr32)
946{
947 union kvmppc_one_reg val;
948 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
949 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
950 int dword_offset, word_offset;
951
952 if (offset == -1)
953 return;
954
Simon Guo4eeb8552018-05-28 09:48:26 +0800955 if (index >= 32) {
956 val.vval = VCPU_VSX_VR(vcpu, index - 32);
Bin Lu6f63e812017-02-21 21:12:36 +0800957 val.vsx32val[offset] = gpr32;
Simon Guo4eeb8552018-05-28 09:48:26 +0800958 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
Bin Lu6f63e812017-02-21 21:12:36 +0800959 } else {
960 dword_offset = offset / 2;
961 word_offset = offset % 2;
962 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
963 val.vsx32val[word_offset] = gpr32;
964 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
965 }
966}
967#endif /* CONFIG_VSX */
968
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -0200969#ifdef CONFIG_ALTIVEC
Simon Guoacc9eb92018-05-21 13:24:26 +0800970static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
971 int index, int element_size)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -0200972{
Simon Guoacc9eb92018-05-21 13:24:26 +0800973 int offset;
974 int elts = sizeof(vector128)/element_size;
975
976 if ((index < 0) || (index >= elts))
977 return -1;
978
979 if (kvmppc_need_byteswap(vcpu))
980 offset = elts - index - 1;
981 else
982 offset = index;
983
984 return offset;
985}
986
987static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
988 int index)
989{
990 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
991}
992
993static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
994 int index)
995{
996 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
997}
998
999static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1000 int index)
1001{
1002 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1003}
1004
1005static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1006 int index)
1007{
1008 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1009}
1010
1011
1012static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1013 u64 gpr)
1014{
1015 union kvmppc_one_reg val;
1016 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1017 vcpu->arch.mmio_vmx_offset);
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001018 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001019
Simon Guoacc9eb92018-05-21 13:24:26 +08001020 if (offset == -1)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001021 return;
1022
Simon Guoacc9eb92018-05-21 13:24:26 +08001023 val.vval = VCPU_VSX_VR(vcpu, index);
1024 val.vsxval[offset] = gpr;
1025 VCPU_VSX_VR(vcpu, index) = val.vval;
1026}
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001027
Simon Guoacc9eb92018-05-21 13:24:26 +08001028static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1029 u32 gpr32)
1030{
1031 union kvmppc_one_reg val;
1032 int offset = kvmppc_get_vmx_word_offset(vcpu,
1033 vcpu->arch.mmio_vmx_offset);
1034 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1035
1036 if (offset == -1)
1037 return;
1038
1039 val.vval = VCPU_VSX_VR(vcpu, index);
1040 val.vsx32val[offset] = gpr32;
1041 VCPU_VSX_VR(vcpu, index) = val.vval;
1042}
1043
1044static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1045 u16 gpr16)
1046{
1047 union kvmppc_one_reg val;
1048 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1049 vcpu->arch.mmio_vmx_offset);
1050 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1051
1052 if (offset == -1)
1053 return;
1054
1055 val.vval = VCPU_VSX_VR(vcpu, index);
1056 val.vsx16val[offset] = gpr16;
1057 VCPU_VSX_VR(vcpu, index) = val.vval;
1058}
1059
1060static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1061 u8 gpr8)
1062{
1063 union kvmppc_one_reg val;
1064 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1065 vcpu->arch.mmio_vmx_offset);
1066 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1067
1068 if (offset == -1)
1069 return;
1070
1071 val.vval = VCPU_VSX_VR(vcpu, index);
1072 val.vsx8val[offset] = gpr8;
1073 VCPU_VSX_VR(vcpu, index) = val.vval;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001074}
1075#endif /* CONFIG_ALTIVEC */
1076
Bin Lu6f63e812017-02-21 21:12:36 +08001077#ifdef CONFIG_PPC_FPU
1078static inline u64 sp_to_dp(u32 fprs)
1079{
1080 u64 fprd;
1081
1082 preempt_disable();
1083 enable_kernel_fp();
1084 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1085 : "fr0");
1086 preempt_enable();
1087 return fprd;
1088}
1089
1090static inline u32 dp_to_sp(u64 fprd)
1091{
1092 u32 fprs;
1093
1094 preempt_disable();
1095 enable_kernel_fp();
1096 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1097 : "fr0");
1098 preempt_enable();
1099 return fprs;
1100}
1101
1102#else
1103#define sp_to_dp(x) (x)
1104#define dp_to_sp(x) (x)
1105#endif /* CONFIG_PPC_FPU */
1106
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001107static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1108 struct kvm_run *run)
1109{
Denis Kirjanov69b61832010-06-11 11:23:26 +00001110 u64 uninitialized_var(gpr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001111
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001112 if (run->mmio.len > sizeof(gpr)) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001113 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1114 return;
1115 }
1116
David Gibsond078eed2015-02-03 16:36:24 +11001117 if (!vcpu->arch.mmio_host_swabbed) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001118 switch (run->mmio.len) {
Alexander Grafb104d062010-02-19 11:00:29 +01001119 case 8: gpr = *(u64 *)run->mmio.data; break;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001120 case 4: gpr = *(u32 *)run->mmio.data; break;
1121 case 2: gpr = *(u16 *)run->mmio.data; break;
1122 case 1: gpr = *(u8 *)run->mmio.data; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001123 }
1124 } else {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001125 switch (run->mmio.len) {
David Gibsond078eed2015-02-03 16:36:24 +11001126 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1127 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1128 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001129 case 1: gpr = *(u8 *)run->mmio.data; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001130 }
1131 }
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001132
Bin Lu6f63e812017-02-21 21:12:36 +08001133 /* conversion between single and double precision */
1134 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1135 gpr = sp_to_dp(gpr);
1136
Alexander Graf3587d532010-02-19 11:00:30 +01001137 if (vcpu->arch.mmio_sign_extend) {
1138 switch (run->mmio.len) {
1139#ifdef CONFIG_PPC64
1140 case 4:
1141 gpr = (s64)(s32)gpr;
1142 break;
1143#endif
1144 case 2:
1145 gpr = (s64)(s16)gpr;
1146 break;
1147 case 1:
1148 gpr = (s64)(s8)gpr;
1149 break;
1150 }
1151 }
1152
Alexander Grafb3c5d3c2012-01-07 02:07:38 +01001153 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1154 case KVM_MMIO_REG_GPR:
Alexander Grafb104d062010-02-19 11:00:29 +01001155 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1156 break;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +01001157 case KVM_MMIO_REG_FPR:
Simon Guo2e6baa42018-05-21 13:24:22 +08001158 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1159 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1160
Paul Mackerrasefff1912013-10-15 20:43:02 +11001161 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +01001162 break;
Alexander Graf287d5612010-04-01 15:33:21 +02001163#ifdef CONFIG_PPC_BOOK3S
Alexander Grafb3c5d3c2012-01-07 02:07:38 +01001164 case KVM_MMIO_REG_QPR:
1165 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +01001166 break;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +01001167 case KVM_MMIO_REG_FQPR:
Paul Mackerrasefff1912013-10-15 20:43:02 +11001168 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
Alexander Grafb3c5d3c2012-01-07 02:07:38 +01001169 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
Alexander Grafb104d062010-02-19 11:00:29 +01001170 break;
Alexander Graf287d5612010-04-01 15:33:21 +02001171#endif
Bin Lu6f63e812017-02-21 21:12:36 +08001172#ifdef CONFIG_VSX
1173 case KVM_MMIO_REG_VSX:
Simon Guo2e6baa42018-05-21 13:24:22 +08001174 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1175 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1176
Simon Guoda2a32b2018-05-21 13:24:25 +08001177 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
Bin Lu6f63e812017-02-21 21:12:36 +08001178 kvmppc_set_vsr_dword(vcpu, gpr);
Simon Guoda2a32b2018-05-21 13:24:25 +08001179 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
Bin Lu6f63e812017-02-21 21:12:36 +08001180 kvmppc_set_vsr_word(vcpu, gpr);
Simon Guoda2a32b2018-05-21 13:24:25 +08001181 else if (vcpu->arch.mmio_copy_type ==
Bin Lu6f63e812017-02-21 21:12:36 +08001182 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1183 kvmppc_set_vsr_dword_dump(vcpu, gpr);
Simon Guoda2a32b2018-05-21 13:24:25 +08001184 else if (vcpu->arch.mmio_copy_type ==
Simon Guo94dd7fa2018-05-21 13:24:20 +08001185 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1186 kvmppc_set_vsr_word_dump(vcpu, gpr);
Bin Lu6f63e812017-02-21 21:12:36 +08001187 break;
1188#endif
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001189#ifdef CONFIG_ALTIVEC
1190 case KVM_MMIO_REG_VMX:
Simon Guo2e6baa42018-05-21 13:24:22 +08001191 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1192 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1193
Simon Guoacc9eb92018-05-21 13:24:26 +08001194 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1195 kvmppc_set_vmx_dword(vcpu, gpr);
1196 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1197 kvmppc_set_vmx_word(vcpu, gpr);
1198 else if (vcpu->arch.mmio_copy_type ==
1199 KVMPPC_VMX_COPY_HWORD)
1200 kvmppc_set_vmx_hword(vcpu, gpr);
1201 else if (vcpu->arch.mmio_copy_type ==
1202 KVMPPC_VMX_COPY_BYTE)
1203 kvmppc_set_vmx_byte(vcpu, gpr);
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001204 break;
1205#endif
Suraj Jitindar Singh873db2c2018-12-14 16:29:08 +11001206#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1207 case KVM_MMIO_REG_NESTED_GPR:
1208 if (kvmppc_need_byteswap(vcpu))
1209 gpr = swab64(gpr);
1210 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1211 sizeof(gpr));
1212 break;
1213#endif
Alexander Grafb104d062010-02-19 11:00:29 +01001214 default:
1215 BUG();
1216 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001217}
1218
Paul Mackerraseb8b0562016-05-05 16:17:10 +10001219static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1220 unsigned int rt, unsigned int bytes,
1221 int is_default_endian, int sign_extend)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001222{
Scott Wooded840ee2013-04-26 14:53:39 +00001223 int idx, ret;
David Gibsond078eed2015-02-03 16:36:24 +11001224 bool host_swabbed;
Cédric Le Goater73601772014-01-09 11:51:16 +01001225
David Gibsond078eed2015-02-03 16:36:24 +11001226 /* Pity C doesn't have a logical XOR operator */
Cédric Le Goater73601772014-01-09 11:51:16 +01001227 if (kvmppc_need_byteswap(vcpu)) {
David Gibsond078eed2015-02-03 16:36:24 +11001228 host_swabbed = is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +01001229 } else {
David Gibsond078eed2015-02-03 16:36:24 +11001230 host_swabbed = !is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +01001231 }
Scott Wooded840ee2013-04-26 14:53:39 +00001232
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001233 if (bytes > sizeof(run->mmio.data)) {
1234 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1235 run->mmio.len);
1236 }
1237
1238 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1239 run->mmio.len = bytes;
1240 run->mmio.is_write = 0;
1241
1242 vcpu->arch.io_gpr = rt;
David Gibsond078eed2015-02-03 16:36:24 +11001243 vcpu->arch.mmio_host_swabbed = host_swabbed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001244 vcpu->mmio_needed = 1;
1245 vcpu->mmio_is_write = 0;
Paul Mackerraseb8b0562016-05-05 16:17:10 +10001246 vcpu->arch.mmio_sign_extend = sign_extend;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001247
Scott Wooded840ee2013-04-26 14:53:39 +00001248 idx = srcu_read_lock(&vcpu->kvm->srcu);
1249
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00001250 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
Scott Wooded840ee2013-04-26 14:53:39 +00001251 bytes, &run->mmio.data);
1252
1253 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1254
1255 if (!ret) {
Alexander Graf0e673fb2012-10-09 00:06:20 +02001256 kvmppc_complete_mmio_load(vcpu, run);
1257 vcpu->mmio_needed = 0;
1258 return EMULATE_DONE;
1259 }
1260
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001261 return EMULATE_DO_MMIO;
1262}
Paul Mackerraseb8b0562016-05-05 16:17:10 +10001263
1264int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1265 unsigned int rt, unsigned int bytes,
1266 int is_default_endian)
1267{
1268 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1269}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301270EXPORT_SYMBOL_GPL(kvmppc_handle_load);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001271
Alexander Graf3587d532010-02-19 11:00:30 +01001272/* Same as above, but sign extends */
1273int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +01001274 unsigned int rt, unsigned int bytes,
1275 int is_default_endian)
Alexander Graf3587d532010-02-19 11:00:30 +01001276{
Paul Mackerraseb8b0562016-05-05 16:17:10 +10001277 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
Alexander Graf3587d532010-02-19 11:00:30 +01001278}
1279
Bin Lu6f63e812017-02-21 21:12:36 +08001280#ifdef CONFIG_VSX
1281int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1282 unsigned int rt, unsigned int bytes,
1283 int is_default_endian, int mmio_sign_extend)
1284{
1285 enum emulation_result emulated = EMULATE_DONE;
1286
Paul Mackerras9aa68252017-11-20 19:56:27 +11001287 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1288 if (vcpu->arch.mmio_vsx_copy_nums > 4)
Bin Lu6f63e812017-02-21 21:12:36 +08001289 return EMULATE_FAIL;
Bin Lu6f63e812017-02-21 21:12:36 +08001290
1291 while (vcpu->arch.mmio_vsx_copy_nums) {
1292 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1293 is_default_endian, mmio_sign_extend);
1294
1295 if (emulated != EMULATE_DONE)
1296 break;
1297
1298 vcpu->arch.paddr_accessed += run->mmio.len;
1299
1300 vcpu->arch.mmio_vsx_copy_nums--;
1301 vcpu->arch.mmio_vsx_offset++;
1302 }
1303 return emulated;
1304}
1305#endif /* CONFIG_VSX */
1306
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001307int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +01001308 u64 val, unsigned int bytes, int is_default_endian)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001309{
1310 void *data = run->mmio.data;
Scott Wooded840ee2013-04-26 14:53:39 +00001311 int idx, ret;
David Gibsond078eed2015-02-03 16:36:24 +11001312 bool host_swabbed;
Cédric Le Goater73601772014-01-09 11:51:16 +01001313
David Gibsond078eed2015-02-03 16:36:24 +11001314 /* Pity C doesn't have a logical XOR operator */
Cédric Le Goater73601772014-01-09 11:51:16 +01001315 if (kvmppc_need_byteswap(vcpu)) {
David Gibsond078eed2015-02-03 16:36:24 +11001316 host_swabbed = is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +01001317 } else {
David Gibsond078eed2015-02-03 16:36:24 +11001318 host_swabbed = !is_default_endian;
Cédric Le Goater73601772014-01-09 11:51:16 +01001319 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001320
1321 if (bytes > sizeof(run->mmio.data)) {
1322 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1323 run->mmio.len);
1324 }
1325
1326 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1327 run->mmio.len = bytes;
1328 run->mmio.is_write = 1;
1329 vcpu->mmio_needed = 1;
1330 vcpu->mmio_is_write = 1;
1331
Bin Lu6f63e812017-02-21 21:12:36 +08001332 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1333 val = dp_to_sp(val);
1334
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001335 /* Store the value at the lowest bytes in 'data'. */
David Gibsond078eed2015-02-03 16:36:24 +11001336 if (!host_swabbed) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001337 switch (bytes) {
Alexander Grafb104d062010-02-19 11:00:29 +01001338 case 8: *(u64 *)data = val; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001339 case 4: *(u32 *)data = val; break;
1340 case 2: *(u16 *)data = val; break;
1341 case 1: *(u8 *)data = val; break;
1342 }
1343 } else {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001344 switch (bytes) {
David Gibsond078eed2015-02-03 16:36:24 +11001345 case 8: *(u64 *)data = swab64(val); break;
1346 case 4: *(u32 *)data = swab32(val); break;
1347 case 2: *(u16 *)data = swab16(val); break;
1348 case 1: *(u8 *)data = val; break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001349 }
1350 }
1351
Scott Wooded840ee2013-04-26 14:53:39 +00001352 idx = srcu_read_lock(&vcpu->kvm->srcu);
1353
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00001354 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
Scott Wooded840ee2013-04-26 14:53:39 +00001355 bytes, &run->mmio.data);
1356
1357 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1358
1359 if (!ret) {
Alexander Graf0e673fb2012-10-09 00:06:20 +02001360 vcpu->mmio_needed = 0;
1361 return EMULATE_DONE;
1362 }
1363
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001364 return EMULATE_DO_MMIO;
1365}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301366EXPORT_SYMBOL_GPL(kvmppc_handle_store);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001367
Bin Lu6f63e812017-02-21 21:12:36 +08001368#ifdef CONFIG_VSX
1369static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1370{
1371 u32 dword_offset, word_offset;
1372 union kvmppc_one_reg reg;
1373 int vsx_offset = 0;
Simon Guoda2a32b2018-05-21 13:24:25 +08001374 int copy_type = vcpu->arch.mmio_copy_type;
Bin Lu6f63e812017-02-21 21:12:36 +08001375 int result = 0;
1376
1377 switch (copy_type) {
1378 case KVMPPC_VSX_COPY_DWORD:
1379 vsx_offset =
1380 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1381
1382 if (vsx_offset == -1) {
1383 result = -1;
1384 break;
1385 }
1386
Simon Guo4eeb8552018-05-28 09:48:26 +08001387 if (rs < 32) {
Bin Lu6f63e812017-02-21 21:12:36 +08001388 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1389 } else {
Simon Guo4eeb8552018-05-28 09:48:26 +08001390 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
Bin Lu6f63e812017-02-21 21:12:36 +08001391 *val = reg.vsxval[vsx_offset];
1392 }
1393 break;
1394
1395 case KVMPPC_VSX_COPY_WORD:
1396 vsx_offset =
1397 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1398
1399 if (vsx_offset == -1) {
1400 result = -1;
1401 break;
1402 }
1403
Simon Guo4eeb8552018-05-28 09:48:26 +08001404 if (rs < 32) {
Bin Lu6f63e812017-02-21 21:12:36 +08001405 dword_offset = vsx_offset / 2;
1406 word_offset = vsx_offset % 2;
1407 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1408 *val = reg.vsx32val[word_offset];
1409 } else {
Simon Guo4eeb8552018-05-28 09:48:26 +08001410 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
Bin Lu6f63e812017-02-21 21:12:36 +08001411 *val = reg.vsx32val[vsx_offset];
1412 }
1413 break;
1414
1415 default:
1416 result = -1;
1417 break;
1418 }
1419
1420 return result;
1421}
1422
1423int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1424 int rs, unsigned int bytes, int is_default_endian)
1425{
1426 u64 val;
1427 enum emulation_result emulated = EMULATE_DONE;
1428
1429 vcpu->arch.io_gpr = rs;
1430
Paul Mackerras9aa68252017-11-20 19:56:27 +11001431 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1432 if (vcpu->arch.mmio_vsx_copy_nums > 4)
Bin Lu6f63e812017-02-21 21:12:36 +08001433 return EMULATE_FAIL;
Bin Lu6f63e812017-02-21 21:12:36 +08001434
1435 while (vcpu->arch.mmio_vsx_copy_nums) {
1436 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1437 return EMULATE_FAIL;
1438
1439 emulated = kvmppc_handle_store(run, vcpu,
1440 val, bytes, is_default_endian);
1441
1442 if (emulated != EMULATE_DONE)
1443 break;
1444
1445 vcpu->arch.paddr_accessed += run->mmio.len;
1446
1447 vcpu->arch.mmio_vsx_copy_nums--;
1448 vcpu->arch.mmio_vsx_offset++;
1449 }
1450
1451 return emulated;
1452}
1453
1454static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1455 struct kvm_run *run)
1456{
1457 enum emulation_result emulated = EMULATE_FAIL;
1458 int r;
1459
1460 vcpu->arch.paddr_accessed += run->mmio.len;
1461
1462 if (!vcpu->mmio_is_write) {
1463 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1464 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1465 } else {
1466 emulated = kvmppc_handle_vsx_store(run, vcpu,
1467 vcpu->arch.io_gpr, run->mmio.len, 1);
1468 }
1469
1470 switch (emulated) {
1471 case EMULATE_DO_MMIO:
1472 run->exit_reason = KVM_EXIT_MMIO;
1473 r = RESUME_HOST;
1474 break;
1475 case EMULATE_FAIL:
1476 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1477 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1478 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1479 r = RESUME_HOST;
1480 break;
1481 default:
1482 r = RESUME_GUEST;
1483 break;
1484 }
1485 return r;
1486}
1487#endif /* CONFIG_VSX */
1488
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001489#ifdef CONFIG_ALTIVEC
Simon Guoacc9eb92018-05-21 13:24:26 +08001490int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1491 unsigned int rt, unsigned int bytes, int is_default_endian)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001492{
Paul Mackerras6df38772018-02-13 15:45:21 +11001493 enum emulation_result emulated = EMULATE_DONE;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001494
Simon Guoacc9eb92018-05-21 13:24:26 +08001495 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1496 return EMULATE_FAIL;
1497
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001498 while (vcpu->arch.mmio_vmx_copy_nums) {
Simon Guoacc9eb92018-05-21 13:24:26 +08001499 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001500 is_default_endian, 0);
1501
1502 if (emulated != EMULATE_DONE)
1503 break;
1504
1505 vcpu->arch.paddr_accessed += run->mmio.len;
1506 vcpu->arch.mmio_vmx_copy_nums--;
Simon Guoacc9eb92018-05-21 13:24:26 +08001507 vcpu->arch.mmio_vmx_offset++;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001508 }
1509
1510 return emulated;
1511}
1512
Simon Guoacc9eb92018-05-21 13:24:26 +08001513int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001514{
Simon Guoacc9eb92018-05-21 13:24:26 +08001515 union kvmppc_one_reg reg;
1516 int vmx_offset = 0;
1517 int result = 0;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001518
Simon Guoacc9eb92018-05-21 13:24:26 +08001519 vmx_offset =
1520 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1521
1522 if (vmx_offset == -1)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001523 return -1;
1524
Simon Guoacc9eb92018-05-21 13:24:26 +08001525 reg.vval = VCPU_VSX_VR(vcpu, index);
1526 *val = reg.vsxval[vmx_offset];
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001527
Simon Guoacc9eb92018-05-21 13:24:26 +08001528 return result;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001529}
1530
Simon Guoacc9eb92018-05-21 13:24:26 +08001531int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1532{
1533 union kvmppc_one_reg reg;
1534 int vmx_offset = 0;
1535 int result = 0;
1536
1537 vmx_offset =
1538 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1539
1540 if (vmx_offset == -1)
1541 return -1;
1542
1543 reg.vval = VCPU_VSX_VR(vcpu, index);
1544 *val = reg.vsx32val[vmx_offset];
1545
1546 return result;
1547}
1548
1549int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1550{
1551 union kvmppc_one_reg reg;
1552 int vmx_offset = 0;
1553 int result = 0;
1554
1555 vmx_offset =
1556 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1557
1558 if (vmx_offset == -1)
1559 return -1;
1560
1561 reg.vval = VCPU_VSX_VR(vcpu, index);
1562 *val = reg.vsx16val[vmx_offset];
1563
1564 return result;
1565}
1566
1567int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1568{
1569 union kvmppc_one_reg reg;
1570 int vmx_offset = 0;
1571 int result = 0;
1572
1573 vmx_offset =
1574 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1575
1576 if (vmx_offset == -1)
1577 return -1;
1578
1579 reg.vval = VCPU_VSX_VR(vcpu, index);
1580 *val = reg.vsx8val[vmx_offset];
1581
1582 return result;
1583}
1584
1585int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1586 unsigned int rs, unsigned int bytes, int is_default_endian)
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001587{
1588 u64 val = 0;
Simon Guoacc9eb92018-05-21 13:24:26 +08001589 unsigned int index = rs & KVM_MMIO_REG_MASK;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001590 enum emulation_result emulated = EMULATE_DONE;
1591
Simon Guoacc9eb92018-05-21 13:24:26 +08001592 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1593 return EMULATE_FAIL;
1594
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001595 vcpu->arch.io_gpr = rs;
1596
1597 while (vcpu->arch.mmio_vmx_copy_nums) {
Simon Guoacc9eb92018-05-21 13:24:26 +08001598 switch (vcpu->arch.mmio_copy_type) {
1599 case KVMPPC_VMX_COPY_DWORD:
1600 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1601 return EMULATE_FAIL;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001602
Simon Guoacc9eb92018-05-21 13:24:26 +08001603 break;
1604 case KVMPPC_VMX_COPY_WORD:
1605 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1606 return EMULATE_FAIL;
1607 break;
1608 case KVMPPC_VMX_COPY_HWORD:
1609 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1610 return EMULATE_FAIL;
1611 break;
1612 case KVMPPC_VMX_COPY_BYTE:
1613 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1614 return EMULATE_FAIL;
1615 break;
1616 default:
1617 return EMULATE_FAIL;
1618 }
1619
1620 emulated = kvmppc_handle_store(run, vcpu, val, bytes,
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001621 is_default_endian);
1622 if (emulated != EMULATE_DONE)
1623 break;
1624
1625 vcpu->arch.paddr_accessed += run->mmio.len;
1626 vcpu->arch.mmio_vmx_copy_nums--;
Simon Guoacc9eb92018-05-21 13:24:26 +08001627 vcpu->arch.mmio_vmx_offset++;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001628 }
1629
1630 return emulated;
1631}
1632
1633static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1634 struct kvm_run *run)
1635{
1636 enum emulation_result emulated = EMULATE_FAIL;
1637 int r;
1638
1639 vcpu->arch.paddr_accessed += run->mmio.len;
1640
1641 if (!vcpu->mmio_is_write) {
Simon Guoacc9eb92018-05-21 13:24:26 +08001642 emulated = kvmppc_handle_vmx_load(run, vcpu,
1643 vcpu->arch.io_gpr, run->mmio.len, 1);
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001644 } else {
Simon Guoacc9eb92018-05-21 13:24:26 +08001645 emulated = kvmppc_handle_vmx_store(run, vcpu,
1646 vcpu->arch.io_gpr, run->mmio.len, 1);
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001647 }
1648
1649 switch (emulated) {
1650 case EMULATE_DO_MMIO:
1651 run->exit_reason = KVM_EXIT_MMIO;
1652 r = RESUME_HOST;
1653 break;
1654 case EMULATE_FAIL:
1655 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1656 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1657 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1658 r = RESUME_HOST;
1659 break;
1660 default:
1661 r = RESUME_GUEST;
1662 break;
1663 }
1664 return r;
1665}
1666#endif /* CONFIG_ALTIVEC */
1667
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001668int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1669{
1670 int r = 0;
1671 union kvmppc_one_reg val;
1672 int size;
1673
1674 size = one_reg_size(reg->id);
1675 if (size > sizeof(val))
1676 return -EINVAL;
1677
1678 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1679 if (r == -EINVAL) {
1680 r = 0;
1681 switch (reg->id) {
Mihai Caraman3840edc2014-08-20 16:36:25 +03001682#ifdef CONFIG_ALTIVEC
1683 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1684 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1685 r = -ENXIO;
1686 break;
1687 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001688 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
Mihai Caraman3840edc2014-08-20 16:36:25 +03001689 break;
1690 case KVM_REG_PPC_VSCR:
1691 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1692 r = -ENXIO;
1693 break;
1694 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001695 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001696 break;
1697 case KVM_REG_PPC_VRSAVE:
Greg Kurzb4d7f162016-01-13 18:28:17 +01001698 val = get_reg_val(reg->id, vcpu->arch.vrsave);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001699 break;
1700#endif /* CONFIG_ALTIVEC */
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001701 default:
1702 r = -EINVAL;
1703 break;
1704 }
1705 }
1706
1707 if (r)
1708 return r;
1709
1710 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1711 r = -EFAULT;
1712
1713 return r;
1714}
1715
1716int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1717{
1718 int r;
1719 union kvmppc_one_reg val;
1720 int size;
1721
1722 size = one_reg_size(reg->id);
1723 if (size > sizeof(val))
1724 return -EINVAL;
1725
1726 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1727 return -EFAULT;
1728
1729 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1730 if (r == -EINVAL) {
1731 r = 0;
1732 switch (reg->id) {
Mihai Caraman3840edc2014-08-20 16:36:25 +03001733#ifdef CONFIG_ALTIVEC
1734 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1735 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1736 r = -ENXIO;
1737 break;
1738 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001739 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
Mihai Caraman3840edc2014-08-20 16:36:25 +03001740 break;
1741 case KVM_REG_PPC_VSCR:
1742 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1743 r = -ENXIO;
1744 break;
1745 }
Greg Kurzb4d7f162016-01-13 18:28:17 +01001746 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001747 break;
1748 case KVM_REG_PPC_VRSAVE:
Greg Kurzb4d7f162016-01-13 18:28:17 +01001749 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1750 r = -ENXIO;
1751 break;
1752 }
1753 vcpu->arch.vrsave = set_reg_val(reg->id, val);
Mihai Caraman3840edc2014-08-20 16:36:25 +03001754 break;
1755#endif /* CONFIG_ALTIVEC */
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001756 default:
1757 r = -EINVAL;
1758 break;
1759 }
1760 }
1761
1762 return r;
1763}
1764
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001765int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1766{
1767 int r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001768
Christoffer Dallaccb7572017-12-04 21:35:25 +01001769 vcpu_load(vcpu);
1770
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001771 if (vcpu->mmio_needed) {
Bin Lu6f63e812017-02-21 21:12:36 +08001772 vcpu->mmio_needed = 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001773 if (!vcpu->mmio_is_write)
1774 kvmppc_complete_mmio_load(vcpu, run);
Bin Lu6f63e812017-02-21 21:12:36 +08001775#ifdef CONFIG_VSX
1776 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1777 vcpu->arch.mmio_vsx_copy_nums--;
1778 vcpu->arch.mmio_vsx_offset++;
1779 }
1780
1781 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1782 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1783 if (r == RESUME_HOST) {
1784 vcpu->mmio_needed = 1;
Christoffer Dallaccb7572017-12-04 21:35:25 +01001785 goto out;
Bin Lu6f63e812017-02-21 21:12:36 +08001786 }
1787 }
1788#endif
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001789#ifdef CONFIG_ALTIVEC
Simon Guoacc9eb92018-05-21 13:24:26 +08001790 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001791 vcpu->arch.mmio_vmx_copy_nums--;
Simon Guoacc9eb92018-05-21 13:24:26 +08001792 vcpu->arch.mmio_vmx_offset++;
1793 }
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001794
1795 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1796 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1797 if (r == RESUME_HOST) {
1798 vcpu->mmio_needed = 1;
Radim Krčmář1ab03c02018-02-09 21:36:57 +01001799 goto out;
Jose Ricardo Ziviani09f98492018-02-03 18:24:26 -02001800 }
1801 }
1802#endif
Alexander Grafad0a0482010-03-24 21:48:30 +01001803 } else if (vcpu->arch.osi_needed) {
1804 u64 *gprs = run->osi.gprs;
1805 int i;
1806
1807 for (i = 0; i < 32; i++)
1808 kvmppc_set_gpr(vcpu, i, gprs[i]);
1809 vcpu->arch.osi_needed = 0;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001810 } else if (vcpu->arch.hcall_needed) {
1811 int i;
1812
1813 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1814 for (i = 0; i < 9; ++i)
1815 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1816 vcpu->arch.hcall_needed = 0;
Alexander Graf1c810632013-01-04 18:12:48 +01001817#ifdef CONFIG_BOOKE
1818 } else if (vcpu->arch.epr_needed) {
1819 kvmppc_set_epr(vcpu, run->epr.epr);
1820 vcpu->arch.epr_needed = 0;
1821#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001822 }
1823
Jan H. Schönherr20b70352017-11-24 22:39:01 +01001824 kvm_sigset_activate(vcpu);
Bin Lu6f63e812017-02-21 21:12:36 +08001825
Paolo Bonzini460df4c2017-02-08 11:50:15 +01001826 if (run->immediate_exit)
1827 r = -EINTR;
1828 else
1829 r = kvmppc_vcpu_run(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001830
Jan H. Schönherr20b70352017-11-24 22:39:01 +01001831 kvm_sigset_deactivate(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001832
Paul Mackerrasc662f772018-02-13 15:16:01 +11001833#ifdef CONFIG_ALTIVEC
Christoffer Dallaccb7572017-12-04 21:35:25 +01001834out:
Paul Mackerrasc662f772018-02-13 15:16:01 +11001835#endif
Christoffer Dallaccb7572017-12-04 21:35:25 +01001836 vcpu_put(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001837 return r;
1838}
1839
1840int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1841{
Paul Mackerras19ccb762011-07-23 17:42:46 +10001842 if (irq->irq == KVM_INTERRUPT_UNSET) {
Paul Mackerras4fe27d22013-02-14 14:00:25 +00001843 kvmppc_core_dequeue_external(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001844 return 0;
1845 }
Hollis Blanchard45c5eb62008-04-25 17:55:49 -05001846
Paul Mackerras19ccb762011-07-23 17:42:46 +10001847 kvmppc_core_queue_external(vcpu, irq);
Christoffer Dallb6d33832012-03-08 16:44:24 -05001848
Scott Wooddfd4d472011-11-17 12:39:59 +00001849 kvm_vcpu_kick(vcpu);
Hollis Blanchard45c5eb62008-04-25 17:55:49 -05001850
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001851 return 0;
1852}
1853
Alexander Graf71fbfd52010-03-24 21:48:29 +01001854static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1855 struct kvm_enable_cap *cap)
1856{
1857 int r;
1858
1859 if (cap->flags)
1860 return -EINVAL;
1861
1862 switch (cap->cap) {
Alexander Grafad0a0482010-03-24 21:48:30 +01001863 case KVM_CAP_PPC_OSI:
1864 r = 0;
1865 vcpu->arch.osi_enabled = true;
1866 break;
Alexander Graf930b4122011-08-08 17:29:42 +02001867 case KVM_CAP_PPC_PAPR:
1868 r = 0;
1869 vcpu->arch.papr_enabled = true;
1870 break;
Alexander Graf1c810632013-01-04 18:12:48 +01001871 case KVM_CAP_PPC_EPR:
1872 r = 0;
Scott Wood5df554ad2013-04-12 14:08:46 +00001873 if (cap->args[0])
1874 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1875 else
1876 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
Alexander Graf1c810632013-01-04 18:12:48 +01001877 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001878#ifdef CONFIG_BOOKE
1879 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1880 r = 0;
1881 vcpu->arch.watchdog_enabled = true;
1882 break;
1883#endif
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001884#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -05001885 case KVM_CAP_SW_TLB: {
1886 struct kvm_config_tlb cfg;
1887 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1888
1889 r = -EFAULT;
1890 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1891 break;
1892
1893 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1894 break;
1895 }
1896#endif
Scott Woodeb1e4f42013-04-12 14:08:47 +00001897#ifdef CONFIG_KVM_MPIC
1898 case KVM_CAP_IRQ_MPIC: {
Al Viro70abade2013-08-30 15:04:22 -04001899 struct fd f;
Scott Woodeb1e4f42013-04-12 14:08:47 +00001900 struct kvm_device *dev;
1901
1902 r = -EBADF;
Al Viro70abade2013-08-30 15:04:22 -04001903 f = fdget(cap->args[0]);
1904 if (!f.file)
Scott Woodeb1e4f42013-04-12 14:08:47 +00001905 break;
1906
1907 r = -EPERM;
Al Viro70abade2013-08-30 15:04:22 -04001908 dev = kvm_device_from_filp(f.file);
Scott Woodeb1e4f42013-04-12 14:08:47 +00001909 if (dev)
1910 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1911
Al Viro70abade2013-08-30 15:04:22 -04001912 fdput(f);
Scott Woodeb1e4f42013-04-12 14:08:47 +00001913 break;
1914 }
1915#endif
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001916#ifdef CONFIG_KVM_XICS
1917 case KVM_CAP_IRQ_XICS: {
Al Viro70abade2013-08-30 15:04:22 -04001918 struct fd f;
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001919 struct kvm_device *dev;
1920
1921 r = -EBADF;
Al Viro70abade2013-08-30 15:04:22 -04001922 f = fdget(cap->args[0]);
1923 if (!f.file)
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001924 break;
1925
1926 r = -EPERM;
Al Viro70abade2013-08-30 15:04:22 -04001927 dev = kvm_device_from_filp(f.file);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001928 if (dev) {
Paul Mackerras03f95332019-02-04 22:07:20 +11001929 if (xics_on_xive())
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001930 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1931 else
1932 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1933 }
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001934
Al Viro70abade2013-08-30 15:04:22 -04001935 fdput(f);
Paul Mackerras5975a2e2013-04-27 00:28:37 +00001936 break;
1937 }
1938#endif /* CONFIG_KVM_XICS */
Cédric Le Goatereacc56b2019-04-18 12:39:28 +02001939#ifdef CONFIG_KVM_XIVE
1940 case KVM_CAP_PPC_IRQ_XIVE: {
1941 struct fd f;
1942 struct kvm_device *dev;
1943
1944 r = -EBADF;
1945 f = fdget(cap->args[0]);
1946 if (!f.file)
1947 break;
1948
1949 r = -ENXIO;
1950 if (!xive_enabled())
1951 break;
1952
1953 r = -EPERM;
1954 dev = kvm_device_from_filp(f.file);
1955 if (dev)
1956 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1957 cap->args[1]);
1958
1959 fdput(f);
1960 break;
1961 }
1962#endif /* CONFIG_KVM_XIVE */
Aravinda Prasad134764e2017-05-11 16:32:48 +05301963#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1964 case KVM_CAP_PPC_FWNMI:
1965 r = -EINVAL;
1966 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1967 break;
1968 r = 0;
1969 vcpu->kvm->arch.fwnmi_enabled = true;
1970 break;
1971#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
Alexander Graf71fbfd52010-03-24 21:48:29 +01001972 default:
1973 r = -EINVAL;
1974 break;
1975 }
1976
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001977 if (!r)
1978 r = kvmppc_sanity_check(vcpu);
1979
Alexander Graf71fbfd52010-03-24 21:48:29 +01001980 return r;
1981}
1982
Paul Mackerras34a75b02016-08-10 11:27:27 +10001983bool kvm_arch_intc_initialized(struct kvm *kvm)
1984{
1985#ifdef CONFIG_KVM_MPIC
1986 if (kvm->arch.mpic)
1987 return true;
1988#endif
1989#ifdef CONFIG_KVM_XICS
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001990 if (kvm->arch.xics || kvm->arch.xive)
Paul Mackerras34a75b02016-08-10 11:27:27 +10001991 return true;
1992#endif
1993 return false;
1994}
1995
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001996int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1997 struct kvm_mp_state *mp_state)
1998{
1999 return -EINVAL;
2000}
2001
2002int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2003 struct kvm_mp_state *mp_state)
2004{
2005 return -EINVAL;
2006}
2007
Paolo Bonzini5cb09442017-12-12 17:41:34 +01002008long kvm_arch_vcpu_async_ioctl(struct file *filp,
2009 unsigned int ioctl, unsigned long arg)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002010{
2011 struct kvm_vcpu *vcpu = filp->private_data;
2012 void __user *argp = (void __user *)arg;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002013
Christoffer Dall9b0624712017-12-04 21:35:36 +01002014 if (ioctl == KVM_INTERRUPT) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002015 struct kvm_interrupt irq;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002016 if (copy_from_user(&irq, argp, sizeof(irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01002017 return -EFAULT;
2018 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002019 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01002020 return -ENOIOCTLCMD;
2021}
2022
2023long kvm_arch_vcpu_ioctl(struct file *filp,
2024 unsigned int ioctl, unsigned long arg)
2025{
2026 struct kvm_vcpu *vcpu = filp->private_data;
2027 void __user *argp = (void __user *)arg;
2028 long r;
Avi Kivity19483d12010-05-13 12:30:43 +03002029
Christoffer Dall9b0624712017-12-04 21:35:36 +01002030 switch (ioctl) {
Alexander Graf71fbfd52010-03-24 21:48:29 +01002031 case KVM_ENABLE_CAP:
2032 {
2033 struct kvm_enable_cap cap;
2034 r = -EFAULT;
Simon Guob3cebfe2018-05-23 15:02:09 +08002035 vcpu_load(vcpu);
Alexander Graf71fbfd52010-03-24 21:48:29 +01002036 if (copy_from_user(&cap, argp, sizeof(cap)))
2037 goto out;
2038 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
Simon Guob3cebfe2018-05-23 15:02:09 +08002039 vcpu_put(vcpu);
Alexander Graf71fbfd52010-03-24 21:48:29 +01002040 break;
2041 }
Scott Wooddc83b8b2011-08-18 15:25:21 -05002042
Alexander Grafe24ed812011-09-14 10:02:41 +02002043 case KVM_SET_ONE_REG:
2044 case KVM_GET_ONE_REG:
2045 {
2046 struct kvm_one_reg reg;
2047 r = -EFAULT;
2048 if (copy_from_user(&reg, argp, sizeof(reg)))
2049 goto out;
2050 if (ioctl == KVM_SET_ONE_REG)
2051 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2052 else
2053 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2054 break;
2055 }
2056
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00002057#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Scott Wooddc83b8b2011-08-18 15:25:21 -05002058 case KVM_DIRTY_TLB: {
2059 struct kvm_dirty_tlb dirty;
2060 r = -EFAULT;
Simon Guob3cebfe2018-05-23 15:02:09 +08002061 vcpu_load(vcpu);
Scott Wooddc83b8b2011-08-18 15:25:21 -05002062 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2063 goto out;
2064 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
Simon Guob3cebfe2018-05-23 15:02:09 +08002065 vcpu_put(vcpu);
Scott Wooddc83b8b2011-08-18 15:25:21 -05002066 break;
2067 }
2068#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002069 default:
2070 r = -EINVAL;
2071 }
2072
2073out:
2074 return r;
2075}
2076
Souptick Joarder1499fa82018-04-19 00:49:58 +05302077vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01002078{
2079 return VM_FAULT_SIGBUS;
2080}
2081
Alexander Graf15711e92010-07-29 14:48:08 +02002082static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2083{
Stuart Yoder784bafa2012-07-03 05:48:51 +00002084 u32 inst_nop = 0x60000000;
2085#ifdef CONFIG_KVM_BOOKE_HV
2086 u32 inst_sc1 = 0x44000022;
Alexander Graf27431032014-04-24 13:39:16 +02002087 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2088 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2089 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2090 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
Stuart Yoder784bafa2012-07-03 05:48:51 +00002091#else
Alexander Graf15711e92010-07-29 14:48:08 +02002092 u32 inst_lis = 0x3c000000;
2093 u32 inst_ori = 0x60000000;
Alexander Graf15711e92010-07-29 14:48:08 +02002094 u32 inst_sc = 0x44000002;
2095 u32 inst_imm_mask = 0xffff;
2096
2097 /*
2098 * The hypercall to get into KVM from within guest context is as
2099 * follows:
2100 *
2101 * lis r0, r0, KVM_SC_MAGIC_R0@h
2102 * ori r0, KVM_SC_MAGIC_R0@l
2103 * sc
2104 * nop
2105 */
Alexander Graf27431032014-04-24 13:39:16 +02002106 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2107 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2108 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2109 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
Stuart Yoder784bafa2012-07-03 05:48:51 +00002110#endif
Alexander Graf15711e92010-07-29 14:48:08 +02002111
Liu Yu-B132019202e072012-07-03 05:48:52 +00002112 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2113
Alexander Graf15711e92010-07-29 14:48:08 +02002114 return 0;
2115}
2116
Alexander Graf5efdb4b2013-04-17 00:37:57 +02002117int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2118 bool line_status)
2119{
2120 if (!irqchip_in_kernel(kvm))
2121 return -ENXIO;
2122
2123 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2124 irq_event->irq, irq_event->level,
2125 line_status);
2126 return 0;
2127}
2128
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002129
Paolo Bonzinie5d83c72017-02-16 10:40:56 +01002130int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2131 struct kvm_enable_cap *cap)
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002132{
2133 int r;
2134
2135 if (cap->flags)
2136 return -EINVAL;
2137
2138 switch (cap->cap) {
2139#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2140 case KVM_CAP_PPC_ENABLE_HCALL: {
2141 unsigned long hcall = cap->args[0];
2142
2143 r = -EINVAL;
2144 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2145 cap->args[1] > 1)
2146 break;
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002147 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2148 break;
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002149 if (cap->args[1])
2150 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2151 else
2152 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2153 r = 0;
2154 break;
2155 }
Paul Mackerras3c313522017-02-06 13:24:41 +11002156 case KVM_CAP_PPC_SMT: {
2157 unsigned long mode = cap->args[0];
2158 unsigned long flags = cap->args[1];
2159
2160 r = -EINVAL;
2161 if (kvm->arch.kvm_ops->set_smt_mode)
2162 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2163 break;
2164 }
Paul Mackerrasaa069a92018-09-21 20:02:01 +10002165
2166 case KVM_CAP_PPC_NESTED_HV:
2167 r = -EINVAL;
2168 if (!is_kvmppc_hv_enabled(kvm) ||
2169 !kvm->arch.kvm_ops->enable_nested)
2170 break;
2171 r = kvm->arch.kvm_ops->enable_nested(kvm);
2172 break;
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002173#endif
2174 default:
2175 r = -EINVAL;
2176 break;
2177 }
2178
2179 return r;
2180}
2181
Paul Mackerras3214d012018-01-15 16:06:47 +11002182#ifdef CONFIG_PPC_BOOK3S_64
2183/*
2184 * These functions check whether the underlying hardware is safe
2185 * against attacks based on observing the effects of speculatively
2186 * executed instructions, and whether it supplies instructions for
2187 * use in workarounds. The information comes from firmware, either
2188 * via the device tree on powernv platforms or from an hcall on
2189 * pseries platforms.
2190 */
2191#ifdef CONFIG_PPC_PSERIES
2192static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2193{
2194 struct h_cpu_char_result c;
2195 unsigned long rc;
2196
2197 if (!machine_is(pseries))
2198 return -ENOTTY;
2199
2200 rc = plpar_get_cpu_characteristics(&c);
2201 if (rc == H_SUCCESS) {
2202 cp->character = c.character;
2203 cp->behaviour = c.behaviour;
2204 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2205 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2206 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2207 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2208 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2209 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2210 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002211 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2212 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
Paul Mackerras3214d012018-01-15 16:06:47 +11002213 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2214 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002215 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2216 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
Paul Mackerras3214d012018-01-15 16:06:47 +11002217 }
2218 return 0;
2219}
2220#else
2221static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2222{
2223 return -ENOTTY;
2224}
2225#endif
2226
2227static inline bool have_fw_feat(struct device_node *fw_features,
2228 const char *state, const char *name)
2229{
2230 struct device_node *np;
2231 bool r = false;
2232
2233 np = of_get_child_by_name(fw_features, name);
2234 if (np) {
2235 r = of_property_read_bool(np, state);
2236 of_node_put(np);
2237 }
2238 return r;
2239}
2240
2241static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2242{
2243 struct device_node *np, *fw_features;
2244 int r;
2245
2246 memset(cp, 0, sizeof(*cp));
2247 r = pseries_get_cpu_char(cp);
2248 if (r != -ENOTTY)
2249 return r;
2250
2251 np = of_find_node_by_name(NULL, "ibm,opal");
2252 if (np) {
2253 fw_features = of_get_child_by_name(np, "fw-features");
2254 of_node_put(np);
2255 if (!fw_features)
2256 return 0;
2257 if (have_fw_feat(fw_features, "enabled",
2258 "inst-spec-barrier-ori31,31,0"))
2259 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2260 if (have_fw_feat(fw_features, "enabled",
2261 "fw-bcctrl-serialized"))
2262 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2263 if (have_fw_feat(fw_features, "enabled",
2264 "inst-l1d-flush-ori30,30,0"))
2265 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2266 if (have_fw_feat(fw_features, "enabled",
2267 "inst-l1d-flush-trig2"))
2268 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2269 if (have_fw_feat(fw_features, "enabled",
2270 "fw-l1d-thread-split"))
2271 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2272 if (have_fw_feat(fw_features, "enabled",
2273 "fw-count-cache-disabled"))
2274 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002275 if (have_fw_feat(fw_features, "enabled",
2276 "fw-count-cache-flush-bcctr2,0,0"))
2277 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
Paul Mackerras3214d012018-01-15 16:06:47 +11002278 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2279 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2280 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2281 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2282 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002283 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2284 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
Paul Mackerras3214d012018-01-15 16:06:47 +11002285
2286 if (have_fw_feat(fw_features, "enabled",
2287 "speculation-policy-favor-security"))
2288 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2289 if (!have_fw_feat(fw_features, "disabled",
2290 "needs-l1d-flush-msr-pr-0-to-1"))
2291 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2292 if (!have_fw_feat(fw_features, "disabled",
2293 "needs-spec-barrier-for-bound-checks"))
2294 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002295 if (have_fw_feat(fw_features, "enabled",
2296 "needs-count-cache-flush-on-context-switch"))
2297 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
Paul Mackerras3214d012018-01-15 16:06:47 +11002298 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2299 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
Suraj Jitindar Singh2b57ecd2019-03-01 14:25:16 +11002300 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2301 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
Paul Mackerras3214d012018-01-15 16:06:47 +11002302
2303 of_node_put(fw_features);
2304 }
2305
2306 return 0;
2307}
2308#endif
2309
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002310long kvm_arch_vm_ioctl(struct file *filp,
2311 unsigned int ioctl, unsigned long arg)
2312{
Scott Wood5df554ad2013-04-12 14:08:46 +00002313 struct kvm *kvm __maybe_unused = filp->private_data;
Alexander Graf15711e92010-07-29 14:48:08 +02002314 void __user *argp = (void __user *)arg;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002315 long r;
2316
2317 switch (ioctl) {
Alexander Graf15711e92010-07-29 14:48:08 +02002318 case KVM_PPC_GET_PVINFO: {
2319 struct kvm_ppc_pvinfo pvinfo;
Vasiliy Kulikovd8cdddc2010-10-30 13:04:24 +04002320 memset(&pvinfo, 0, sizeof(pvinfo));
Alexander Graf15711e92010-07-29 14:48:08 +02002321 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2322 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2323 r = -EFAULT;
2324 goto out;
2325 }
2326
2327 break;
2328 }
Paul Mackerras76d837a2017-05-11 14:31:59 +10002329#ifdef CONFIG_SPAPR_TCE_IOMMU
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11002330 case KVM_CREATE_SPAPR_TCE_64: {
2331 struct kvm_create_spapr_tce_64 create_tce_64;
2332
2333 r = -EFAULT;
2334 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2335 goto out;
2336 if (create_tce_64.flags) {
2337 r = -EINVAL;
2338 goto out;
2339 }
2340 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2341 goto out;
2342 }
David Gibson54738c02011-06-29 00:22:41 +00002343 case KVM_CREATE_SPAPR_TCE: {
2344 struct kvm_create_spapr_tce create_tce;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11002345 struct kvm_create_spapr_tce_64 create_tce_64;
David Gibson54738c02011-06-29 00:22:41 +00002346
2347 r = -EFAULT;
2348 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2349 goto out;
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +11002350
2351 create_tce_64.liobn = create_tce.liobn;
2352 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2353 create_tce_64.offset = 0;
2354 create_tce_64.size = create_tce.window_size >>
2355 IOMMU_PAGE_SHIFT_4K;
2356 create_tce_64.flags = 0;
2357 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
David Gibson54738c02011-06-29 00:22:41 +00002358 goto out;
2359 }
Paul Mackerras76d837a2017-05-11 14:31:59 +10002360#endif
2361#ifdef CONFIG_PPC_BOOK3S_64
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00002362 case KVM_PPC_GET_SMMU_INFO: {
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00002363 struct kvm_ppc_smmu_info info;
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302364 struct kvm *kvm = filp->private_data;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00002365
2366 memset(&info, 0, sizeof(info));
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302367 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00002368 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2369 r = -EFAULT;
2370 break;
2371 }
Michael Ellerman8e591cb2013-04-17 20:30:00 +00002372 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2373 struct kvm *kvm = filp->private_data;
2374
2375 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2376 break;
2377 }
Paul Mackerrasc9270132017-01-30 21:21:41 +11002378 case KVM_PPC_CONFIGURE_V3_MMU: {
2379 struct kvm *kvm = filp->private_data;
2380 struct kvm_ppc_mmuv3_cfg cfg;
2381
2382 r = -EINVAL;
2383 if (!kvm->arch.kvm_ops->configure_mmu)
2384 goto out;
2385 r = -EFAULT;
2386 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2387 goto out;
2388 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2389 break;
2390 }
2391 case KVM_PPC_GET_RMMU_INFO: {
2392 struct kvm *kvm = filp->private_data;
2393 struct kvm_ppc_rmmu_info info;
2394
2395 r = -EINVAL;
2396 if (!kvm->arch.kvm_ops->get_rmmu_info)
2397 goto out;
2398 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2399 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2400 r = -EFAULT;
2401 break;
2402 }
Paul Mackerras3214d012018-01-15 16:06:47 +11002403 case KVM_PPC_GET_CPU_CHAR: {
2404 struct kvm_ppc_cpu_char cpuchar;
2405
2406 r = kvmppc_get_cpu_char(&cpuchar);
2407 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2408 r = -EFAULT;
2409 break;
2410 }
Bharata B Rao22945682019-11-25 08:36:30 +05302411 case KVM_PPC_SVM_OFF: {
2412 struct kvm *kvm = filp->private_data;
2413
2414 r = 0;
2415 if (!kvm->arch.kvm_ops->svm_off)
2416 goto out;
2417
2418 r = kvm->arch.kvm_ops->svm_off(kvm);
2419 break;
2420 }
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302421 default: {
2422 struct kvm *kvm = filp->private_data;
2423 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2424 }
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302425#else /* CONFIG_PPC_BOOK3S_64 */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002426 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002427 r = -ENOTTY;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302428#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002429 }
Alexander Graf15711e92010-07-29 14:48:08 +02002430out:
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002431 return r;
2432}
2433
Scott Wood043cc4d2011-12-20 15:34:20 +00002434static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2435static unsigned long nr_lpids;
2436
2437long kvmppc_alloc_lpid(void)
2438{
2439 long lpid;
2440
2441 do {
2442 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2443 if (lpid >= nr_lpids) {
2444 pr_err("%s: No LPIDs free\n", __func__);
2445 return -ENOMEM;
2446 }
2447 } while (test_and_set_bit(lpid, lpid_inuse));
2448
2449 return lpid;
2450}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05302451EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00002452
2453void kvmppc_claim_lpid(long lpid)
2454{
2455 set_bit(lpid, lpid_inuse);
2456}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05302457EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00002458
2459void kvmppc_free_lpid(long lpid)
2460{
2461 clear_bit(lpid, lpid_inuse);
2462}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05302463EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00002464
2465void kvmppc_init_lpid(unsigned long nr_lpids_param)
2466{
2467 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2468 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2469}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05302470EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
Scott Wood043cc4d2011-12-20 15:34:20 +00002471
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002472int kvm_arch_init(void *opaque)
2473{
2474 return 0;
2475}
2476
Paolo Bonzini478d66862014-08-05 11:29:07 +02002477EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);