blob: db3a87319642af9bc145ee54458a68aea3e2d34d [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002/*
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Alexander Graf <agraf@suse.de>
7 * Kevin Wolf <mail@kevin-wolf.de>
8 * Paul Mackerras <paulus@samba.org>
9 *
10 * Description:
11 * Functions relating to running KVM on Book 3S processors where
12 * we don't have access to hypervisor mode, and we run the guest
13 * in problem state (user mode).
14 *
15 * This file is derived from arch/powerpc/kvm/44x.c,
16 * by Hollis Blanchard <hollisb@us.ibm.com>.
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000017 */
18
19#include <linux/kvm_host.h>
Paul Gortmaker93087942011-07-29 16:19:31 +100020#include <linux/export.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000021#include <linux/err.h>
22#include <linux/slab.h>
23
24#include <asm/reg.h>
25#include <asm/cputable.h>
26#include <asm/cacheflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080027#include <linux/uaccess.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000028#include <asm/io.h>
29#include <asm/kvm_ppc.h>
30#include <asm/kvm_book3s.h>
31#include <asm/mmu_context.h>
Benjamin Herrenschmidt95327d02012-04-01 17:35:53 +000032#include <asm/switch_to.h>
Ian Munsiea413f472012-12-03 18:36:13 +000033#include <asm/firmware.h>
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +100034#include <asm/setup.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000035#include <linux/gfp.h>
36#include <linux/sched.h>
37#include <linux/vmalloc.h>
38#include <linux/highmem.h>
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +053039#include <linux/module.h>
Alexander Graf398a76c2013-12-09 13:53:42 +010040#include <linux/miscdevice.h>
Simon Guo66c33e72018-05-23 15:01:57 +080041#include <asm/asm-prototypes.h>
Simon Guo8d2e2fc2018-05-23 15:01:58 +080042#include <asm/tm.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000043
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053044#include "book3s.h"
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053045
46#define CREATE_TRACE_POINTS
47#include "trace_pr.h"
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000048
49/* #define EXIT_DEBUG */
50/* #define DEBUG_EXT */
51
52static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
53 ulong msr);
Simon Guo7284ca82018-05-23 15:02:07 +080054#ifdef CONFIG_PPC_BOOK3S_64
55static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
56#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000057
58/* Some compatibility defines */
59#ifdef CONFIG_PPC_BOOK3S_32
60#define MSR_USER32 MSR_USER
61#define MSR_USER64 MSR_USER
62#define HW_PAGE_SIZE PAGE_SIZE
Alexey Kardashevskiy6c7d47c2017-11-22 14:42:21 +110063#define HPTE_R_M _PAGE_COHERENT
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000064#endif
65
Alexander Grafc01e3f62014-07-11 02:58:58 +020066static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
67{
68 ulong msr = kvmppc_get_msr(vcpu);
69 return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
70}
71
72static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
73{
74 ulong msr = kvmppc_get_msr(vcpu);
75 ulong pc = kvmppc_get_pc(vcpu);
76
77 /* We are in DR only split real mode */
78 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
79 return;
80
81 /* We have not fixed up the guest already */
82 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
83 return;
84
85 /* The code is in fixupable address space */
86 if (pc & SPLIT_HACK_MASK)
87 return;
88
89 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
90 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
91}
92
Nicholas Piggin87a45e02019-10-02 16:00:22 +100093static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
94{
95 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
96 ulong pc = kvmppc_get_pc(vcpu);
97 ulong lr = kvmppc_get_lr(vcpu);
98 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
99 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
100 if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
101 kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
102 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
103 }
104}
105
106static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
107{
108 unsigned long msr, pc, new_msr, new_pc;
109
110 kvmppc_unfixup_split_real(vcpu);
111
112 msr = kvmppc_get_msr(vcpu);
113 pc = kvmppc_get_pc(vcpu);
114 new_msr = vcpu->arch.intr_msr;
115 new_pc = to_book3s(vcpu)->hior + vec;
116
117#ifdef CONFIG_PPC_BOOK3S_64
118 /* If transactional, change to suspend mode on IRQ delivery */
119 if (MSR_TM_TRANSACTIONAL(msr))
120 new_msr |= MSR_TS_S;
121 else
122 new_msr |= msr & MSR_TS_MASK;
123#endif
124
125 kvmppc_set_srr0(vcpu, pc);
126 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
127 kvmppc_set_pc(vcpu, new_pc);
128 kvmppc_set_msr(vcpu, new_msr);
129}
Alexander Grafc01e3f62014-07-11 02:58:58 +0200130
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530131static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000132{
133#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +0100134 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
135 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +0100136 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100137 svcpu->in_use = 0;
Alexander Graf468a12c2011-12-09 14:44:13 +0100138 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000139#endif
Alexander Graffb4188b2014-06-09 01:16:32 +0200140
141 /* Disable AIL if supported */
142 if (cpu_has_feature(CPU_FTR_HVMODE) &&
143 cpu_has_feature(CPU_FTR_ARCH_207S))
144 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
145
Paul Mackerrasa47d72f2012-09-20 19:35:51 +0000146 vcpu->cpu = smp_processor_id();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000147#ifdef CONFIG_PPC_BOOK3S_32
Paul Mackerras3ff95502013-09-20 14:52:49 +1000148 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000149#endif
Alexander Grafc01e3f62014-07-11 02:58:58 +0200150
151 if (kvmppc_is_split_real(vcpu))
152 kvmppc_fixup_split_real(vcpu);
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800153
154 kvmppc_restore_tm_pr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000155}
156
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530157static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000158{
159#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +0100160 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100161 if (svcpu->in_use) {
Alexander Graf07ae5382018-01-31 22:24:58 +0100162 kvmppc_copy_from_svcpu(vcpu);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100163 }
Alexander Graf468a12c2011-12-09 14:44:13 +0100164 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +0100165 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
166 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000167#endif
168
Alexander Grafc01e3f62014-07-11 02:58:58 +0200169 if (kvmppc_is_split_real(vcpu))
170 kvmppc_unfixup_split_real(vcpu);
171
Paul Mackerras28c483b2012-11-04 18:16:46 +0000172 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
Alexander Grafe14e7a12014-04-22 12:26:58 +0200173 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800174 kvmppc_save_tm_pr(vcpu);
Alexander Graffb4188b2014-06-09 01:16:32 +0200175
176 /* Enable AIL if supported */
177 if (cpu_has_feature(CPU_FTR_HVMODE) &&
178 cpu_has_feature(CPU_FTR_ARCH_207S))
179 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
180
Paul Mackerrasa47d72f2012-09-20 19:35:51 +0000181 vcpu->cpu = -1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000182}
183
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000184/* Copy data needed by real-mode code from vcpu to shadow vcpu */
Alexander Graf07ae5382018-01-31 22:24:58 +0100185void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000186{
Alexander Graf07ae5382018-01-31 22:24:58 +0100187 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
188
Simon Guo1143a702018-05-07 14:20:07 +0800189 svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
190 svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
191 svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
192 svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
193 svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
194 svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
195 svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
196 svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
197 svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
198 svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
199 svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
200 svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
201 svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
202 svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
Paul Mackerrasfd0944b2018-10-08 16:30:58 +1100203 svcpu->cr = vcpu->arch.regs.ccr;
Simon Guo173c5202018-05-07 14:20:08 +0800204 svcpu->xer = vcpu->arch.regs.xer;
205 svcpu->ctr = vcpu->arch.regs.ctr;
206 svcpu->lr = vcpu->arch.regs.link;
207 svcpu->pc = vcpu->arch.regs.nip;
Alexander Graf616dff82014-04-29 16:48:44 +0200208#ifdef CONFIG_PPC_BOOK3S_64
209 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
210#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530211 /*
212 * Now also save the current time base value. We use this
213 * to find the guest purr and spurr value.
214 */
215 vcpu->arch.entry_tb = get_tb();
Aneesh Kumar K.V8f42ab22014-06-05 17:38:02 +0530216 vcpu->arch.entry_vtb = get_vtb();
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530217 if (cpu_has_feature(CPU_FTR_ARCH_207S))
218 vcpu->arch.entry_ic = mfspr(SPRN_IC);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100219 svcpu->in_use = true;
Alexander Graf07ae5382018-01-31 22:24:58 +0100220
221 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000222}
223
Simon Guo95757bf2018-05-23 15:01:53 +0800224static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
225{
226 ulong guest_msr = kvmppc_get_msr(vcpu);
227 ulong smsr = guest_msr;
228
229 /* Guest MSR values */
230#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
231 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE |
232 MSR_TM | MSR_TS_MASK;
233#else
234 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
235#endif
236 /* Process MSR values */
237 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
238 /* External providers the guest reserved */
239 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
240 /* 64-bit Process MSR values */
241#ifdef CONFIG_PPC_BOOK3S_64
242 smsr |= MSR_ISF | MSR_HV;
243#endif
Simon Guo57063402018-05-23 15:02:01 +0800244#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
245 /*
246 * in guest privileged state, we want to fail all TM transactions.
247 * So disable MSR TM bit so that all tbegin. will be able to be
248 * trapped into host.
249 */
250 if (!(guest_msr & MSR_PR))
251 smsr &= ~MSR_TM;
252#endif
Simon Guo95757bf2018-05-23 15:01:53 +0800253 vcpu->arch.shadow_msr = smsr;
254}
255
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000256/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
Alexander Graf07ae5382018-01-31 22:24:58 +0100257void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000258{
Alexander Graf07ae5382018-01-31 22:24:58 +0100259 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Simon Guo95757bf2018-05-23 15:01:53 +0800260#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
261 ulong old_msr;
262#endif
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100263
264 /*
265 * Maybe we were already preempted and synced the svcpu from
266 * our preempt notifiers. Don't bother touching this svcpu then.
267 */
268 if (!svcpu->in_use)
269 goto out;
270
Simon Guo1143a702018-05-07 14:20:07 +0800271 vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
272 vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
273 vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
274 vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
275 vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
276 vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
277 vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
278 vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
279 vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
280 vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
281 vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
282 vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
283 vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
284 vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
Paul Mackerrasfd0944b2018-10-08 16:30:58 +1100285 vcpu->arch.regs.ccr = svcpu->cr;
Simon Guo173c5202018-05-07 14:20:08 +0800286 vcpu->arch.regs.xer = svcpu->xer;
287 vcpu->arch.regs.ctr = svcpu->ctr;
288 vcpu->arch.regs.link = svcpu->lr;
289 vcpu->arch.regs.nip = svcpu->pc;
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000290 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
291 vcpu->arch.fault_dar = svcpu->fault_dar;
292 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
293 vcpu->arch.last_inst = svcpu->last_inst;
Alexander Graf616dff82014-04-29 16:48:44 +0200294#ifdef CONFIG_PPC_BOOK3S_64
295 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
296#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530297 /*
298 * Update purr and spurr using time base on exit.
299 */
300 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
301 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000302 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530303 if (cpu_has_feature(CPU_FTR_ARCH_207S))
304 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
Simon Guo95757bf2018-05-23 15:01:53 +0800305
306#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
307 /*
308 * Unlike other MSR bits, MSR[TS]bits can be changed at guest without
309 * notifying host:
310 * modified by unprivileged instructions like "tbegin"/"tend"/
311 * "tresume"/"tsuspend" in PR KVM guest.
312 *
313 * It is necessary to sync here to calculate a correct shadow_msr.
314 *
315 * privileged guest's tbegin will be failed at present. So we
316 * only take care of problem state guest.
317 */
318 old_msr = kvmppc_get_msr(vcpu);
319 if (unlikely((old_msr & MSR_PR) &&
320 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
321 (old_msr & (MSR_TS_MASK)))) {
322 old_msr &= ~(MSR_TS_MASK);
323 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
324 kvmppc_set_msr_fast(vcpu, old_msr);
325 kvmppc_recalc_shadow_msr(vcpu);
326 }
327#endif
328
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100329 svcpu->in_use = false;
330
331out:
Alexander Graf07ae5382018-01-31 22:24:58 +0100332 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000333}
334
Simon Guo66c33e72018-05-23 15:01:57 +0800335#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Simon Guoe32c53d2018-05-23 15:02:04 +0800336void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
Simon Guo66c33e72018-05-23 15:01:57 +0800337{
338 tm_enable();
339 vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
340 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
341 vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
342 tm_disable();
343}
344
Simon Guo57063402018-05-23 15:02:01 +0800345void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
Simon Guo66c33e72018-05-23 15:01:57 +0800346{
347 tm_enable();
348 mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
349 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
350 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
351 tm_disable();
352}
353
Simon Guo13989b62018-05-23 15:01:59 +0800354/* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
355 * hardware.
356 */
357static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
358{
359 ulong exit_nr;
360 ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
361 (MSR_FP | MSR_VEC | MSR_VSX);
362
363 if (!ext_diff)
364 return;
365
366 if (ext_diff == MSR_FP)
367 exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL;
368 else if (ext_diff == MSR_VEC)
369 exit_nr = BOOK3S_INTERRUPT_ALTIVEC;
370 else
371 exit_nr = BOOK3S_INTERRUPT_VSX;
372
373 kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
374}
375
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800376void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
377{
378 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
379 kvmppc_save_tm_sprs(vcpu);
380 return;
381 }
382
Simon Guo7284ca82018-05-23 15:02:07 +0800383 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Simon Guo13989b62018-05-23 15:01:59 +0800384 kvmppc_giveup_ext(vcpu, MSR_VSX);
385
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800386 preempt_disable();
387 _kvmppc_save_tm_pr(vcpu, mfmsr());
388 preempt_enable();
389}
390
391void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
392{
393 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
394 kvmppc_restore_tm_sprs(vcpu);
Simon Guo7284ca82018-05-23 15:02:07 +0800395 if (kvmppc_get_msr(vcpu) & MSR_TM) {
Simon Guo13989b62018-05-23 15:01:59 +0800396 kvmppc_handle_lost_math_exts(vcpu);
Simon Guo7284ca82018-05-23 15:02:07 +0800397 if (vcpu->arch.fscr & FSCR_TAR)
398 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
399 }
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800400 return;
401 }
402
403 preempt_disable();
404 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
405 preempt_enable();
Simon Guo13989b62018-05-23 15:01:59 +0800406
Simon Guo7284ca82018-05-23 15:02:07 +0800407 if (kvmppc_get_msr(vcpu) & MSR_TM) {
Simon Guo13989b62018-05-23 15:01:59 +0800408 kvmppc_handle_lost_math_exts(vcpu);
Simon Guo7284ca82018-05-23 15:02:07 +0800409 if (vcpu->arch.fscr & FSCR_TAR)
410 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
411 }
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800412}
Simon Guo66c33e72018-05-23 15:01:57 +0800413#endif
414
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530415static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
Alexander Graf03d25c52012-08-10 12:28:50 +0200416{
Alexander Graf7c973a22012-08-13 12:50:35 +0200417 int r = 1; /* Indicate we want to get back into the guest */
418
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200419 /* We misuse TLB_FLUSH to indicate that we want to clear
420 all shadow cache entries */
421 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
422 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Graf7c973a22012-08-13 12:50:35 +0200423
424 return r;
Alexander Graf03d25c52012-08-10 12:28:50 +0200425}
426
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200427/************* MMU Notifiers *************/
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000428static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
429 unsigned long end)
430{
431 long i;
432 struct kvm_vcpu *vcpu;
433 struct kvm_memslots *slots;
434 struct kvm_memory_slot *memslot;
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200435
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000436 slots = kvm_memslots(kvm);
437 kvm_for_each_memslot(memslot, slots) {
438 unsigned long hva_start, hva_end;
439 gfn_t gfn, gfn_end;
440
441 hva_start = max(start, memslot->userspace_addr);
442 hva_end = min(end, memslot->userspace_addr +
443 (memslot->npages << PAGE_SHIFT));
444 if (hva_start >= hva_end)
445 continue;
446 /*
447 * {gfn(page) | page intersects with [hva_start, hva_end)} =
448 * {gfn, gfn+1, ..., gfn_end-1}.
449 */
450 gfn = hva_to_gfn_memslot(hva_start, memslot);
451 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
452 kvm_for_each_vcpu(i, vcpu, kvm)
453 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
454 gfn_end << PAGE_SHIFT);
455 }
456}
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200457
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530458static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
459 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200460{
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000461 do_kvm_unmap_hva(kvm, start, end);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200462
463 return 0;
464}
465
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700466static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
467 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200468{
469 /* XXX could be more clever ;) */
470 return 0;
471}
472
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530473static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200474{
475 /* XXX could be more clever ;) */
476 return 0;
477}
478
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530479static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200480{
481 /* The page will get remapped properly on its next fault */
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000482 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200483}
484
485/*****************************************/
486
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530487static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000488{
Simon Guo68ab07b2018-05-23 15:02:06 +0800489 ulong old_msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000490
Paul Mackerras4f169d22018-06-07 18:07:06 +1000491 /* For PAPR guest, make sure MSR reflects guest mode */
492 if (vcpu->arch.papr_enabled)
493 msr = (msr & ~MSR_HV) | MSR_ME;
494
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000495#ifdef EXIT_DEBUG
496 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
497#endif
498
Simon Guo68ab07b2018-05-23 15:02:06 +0800499#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
500 /* We should never target guest MSR to TS=10 && PR=0,
501 * since we always fail transaction for guest privilege
502 * state.
503 */
504 if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr))
505 kvmppc_emulate_tabort(vcpu,
506 TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT);
507#endif
508
509 old_msr = kvmppc_get_msr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000510 msr &= to_book3s(vcpu)->msr_mask;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200511 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000512 kvmppc_recalc_shadow_msr(vcpu);
513
514 if (msr & MSR_POW) {
515 if (!vcpu->arch.pending_exceptions) {
516 kvm_vcpu_block(vcpu);
Radim Krčmář72875d82017-04-26 22:32:19 +0200517 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000518 vcpu->stat.halt_wakeup++;
519
520 /* Unset POW bit after we woke up */
521 msr &= ~MSR_POW;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200522 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000523 }
524 }
525
Alexander Grafc01e3f62014-07-11 02:58:58 +0200526 if (kvmppc_is_split_real(vcpu))
527 kvmppc_fixup_split_real(vcpu);
528 else
529 kvmppc_unfixup_split_real(vcpu);
530
Alexander Graf5deb8e72014-04-24 13:46:24 +0200531 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000532 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
533 kvmppc_mmu_flush_segments(vcpu);
534 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
535
536 /* Preload magic page segment when in kernel mode */
537 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
538 struct kvm_vcpu_arch *a = &vcpu->arch;
539
540 if (msr & MSR_DR)
541 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
542 else
543 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
544 }
545 }
546
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000547 /*
548 * When switching from 32 to 64-bit, we may have a stale 32-bit
549 * magic page around, we need to flush it. Typically 32-bit magic
Finn Thain3cc97be2018-08-23 17:00:52 -0700550 * page will be instantiated when calling into RTAS. Note: We
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000551 * assume that such transition only happens while in kernel mode,
552 * ie, we never transition from user 32-bit to kernel 64-bit with
553 * a 32-bit magic page around.
554 */
555 if (vcpu->arch.magic_page_pa &&
556 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
557 /* going from RTAS to normal kernel code */
558 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
559 ~0xFFFUL);
560 }
561
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000562 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200563 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000564 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
Simon Guo13989b62018-05-23 15:01:59 +0800565
566#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
567 if (kvmppc_get_msr(vcpu) & MSR_TM)
568 kvmppc_handle_lost_math_exts(vcpu);
569#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000570}
571
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530572void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000573{
574 u32 host_pvr;
575
576 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
577 vcpu->arch.pvr = pvr;
578#ifdef CONFIG_PPC_BOOK3S_64
579 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
580 kvmppc_mmu_book3s_64_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200581 if (!to_book3s(vcpu)->hior_explicit)
582 to_book3s(vcpu)->hior = 0xfff00000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000583 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200584 vcpu->arch.cpu_type = KVM_CPU_3S_64;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000585 } else
586#endif
587 {
588 kvmppc_mmu_book3s_32_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200589 if (!to_book3s(vcpu)->hior_explicit)
590 to_book3s(vcpu)->hior = 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000591 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200592 vcpu->arch.cpu_type = KVM_CPU_3S_32;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000593 }
594
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200595 kvmppc_sanity_check(vcpu);
596
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000597 /* If we are in hypervisor level on 970, we can tell the CPU to
598 * treat DCBZ as 32 bytes store */
599 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
600 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
601 !strcmp(cur_cpu_spec->platform, "ppc970"))
602 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
603
604 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
605 really needs them in a VM on Cell and force disable them. */
606 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
607 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
608
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000609 /*
610 * If they're asking for POWER6 or later, set the flag
611 * indicating that we can do multiple large page sizes
612 * and 1TB segments.
613 * Also set the flag that indicates that tlbie has the large
614 * page bit in the RB operand instead of the instruction.
615 */
616 switch (PVR_VER(pvr)) {
617 case PVR_POWER6:
618 case PVR_POWER7:
619 case PVR_POWER7p:
620 case PVR_POWER8:
Thomas Huth2365f6b2016-09-21 13:53:46 +0200621 case PVR_POWER8E:
622 case PVR_POWER8NVL:
Suraj Jitindar Singh61422362018-12-07 12:17:03 +1100623 case PVR_POWER9:
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000624 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
625 BOOK3S_HFLAG_NEW_TLBIE;
626 break;
627 }
628
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000629#ifdef CONFIG_PPC_BOOK3S_32
630 /* 32 bit Book3S always has 32 byte dcbz */
631 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
632#endif
633
634 /* On some CPUs we can execute paired single operations natively */
635 asm ( "mfpvr %0" : "=r"(host_pvr));
636 switch (host_pvr) {
637 case 0x00080200: /* lonestar 2.0 */
638 case 0x00088202: /* lonestar 2.2 */
639 case 0x70000100: /* gekko 1.0 */
640 case 0x00080100: /* gekko 2.0 */
641 case 0x00083203: /* gekko 2.3a */
642 case 0x00083213: /* gekko 2.3b */
643 case 0x00083204: /* gekko 2.4 */
644 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
645 case 0x00087200: /* broadway */
646 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
647 /* Enable HID2.PSE - in case we need it later */
648 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
649 }
650}
651
652/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
653 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
654 * emulate 32 bytes dcbz length.
655 *
656 * The Book3s_64 inventors also realized this case and implemented a special bit
657 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
658 *
659 * My approach here is to patch the dcbz instruction on executing pages.
660 */
661static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
662{
663 struct page *hpage;
664 u64 hpage_offset;
665 u32 *page;
666 int i;
667
668 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
Xiao Guangrong32cad842012-08-03 15:42:52 +0800669 if (is_error_page(hpage))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000670 return;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000671
672 hpage_offset = pte->raddr & ~PAGE_MASK;
673 hpage_offset &= ~0xFFFULL;
674 hpage_offset /= 4;
675
676 get_page(hpage);
Cong Wang2480b202011-11-25 23:14:16 +0800677 page = kmap_atomic(hpage);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000678
679 /* patch dcbz into reserved instruction, so we trap */
680 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
Alexander Grafcd087ee2014-04-24 13:52:01 +0200681 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
682 page[i] &= cpu_to_be32(0xfffffff7);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000683
Cong Wang2480b202011-11-25 23:14:16 +0800684 kunmap_atomic(page);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000685 put_page(hpage);
686}
687
Yaowei Bai378b4172015-11-16 11:10:24 +0800688static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000689{
690 ulong mp_pa = vcpu->arch.magic_page_pa;
691
Alexander Graf5deb8e72014-04-24 13:46:24 +0200692 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000693 mp_pa = (uint32_t)mp_pa;
694
Alexander Graf89b68c92014-07-13 16:37:12 +0200695 gpa &= ~0xFFFULL;
696 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
Yaowei Bai378b4172015-11-16 11:10:24 +0800697 return true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000698 }
699
Alexander Graf89b68c92014-07-13 16:37:12 +0200700 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000701}
702
703int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
704 ulong eaddr, int vec)
705{
706 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000707 bool iswrite = false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000708 int r = RESUME_GUEST;
709 int relocated;
710 int page_found = 0;
Alexey Kardashevskiy96df2262017-03-24 17:49:22 +1100711 struct kvmppc_pte pte = { 0 };
Alexander Graf5deb8e72014-04-24 13:46:24 +0200712 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
713 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000714 u64 vsid;
715
716 relocated = data ? dr : ir;
Paul Mackerras93b159b2013-09-20 14:52:51 +1000717 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
718 iswrite = true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000719
720 /* Resolve real address if translation turned on */
721 if (relocated) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000722 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000723 } else {
724 pte.may_execute = true;
725 pte.may_read = true;
726 pte.may_write = true;
727 pte.raddr = eaddr & KVM_PAM;
728 pte.eaddr = eaddr;
729 pte.vpage = eaddr >> 12;
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000730 pte.page_size = MMU_PAGE_64K;
Alexey Kardashevskiy6c7d47c2017-11-22 14:42:21 +1100731 pte.wimg = HPTE_R_M;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000732 }
733
Alexander Graf5deb8e72014-04-24 13:46:24 +0200734 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000735 case 0:
736 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
737 break;
738 case MSR_DR:
Alexander Grafc01e3f62014-07-11 02:58:58 +0200739 if (!data &&
740 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
741 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
742 pte.raddr &= ~SPLIT_HACK_MASK;
743 /* fall through */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000744 case MSR_IR:
745 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
746
Alexander Graf5deb8e72014-04-24 13:46:24 +0200747 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000748 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
749 else
750 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
751 pte.vpage |= vsid;
752
753 if (vsid == -1)
754 page_found = -EINVAL;
755 break;
756 }
757
758 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
759 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
760 /*
761 * If we do the dcbz hack, we have to NX on every execution,
762 * so we can patch the executing code. This renders our guest
763 * NX-less.
764 */
765 pte.may_execute = !data;
766 }
767
Paul Mackerras916ccad2018-06-07 18:04:37 +1000768 if (page_found == -ENOENT || page_found == -EPERM) {
769 /* Page not found in guest PTE entries, or protection fault */
770 u64 flags;
771
772 if (page_found == -EPERM)
773 flags = DSISR_PROTFAULT;
774 else
775 flags = DSISR_NOHPTE;
776 if (data) {
777 flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
778 kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
779 } else {
780 kvmppc_core_queue_inst_storage(vcpu, flags);
781 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000782 } else if (page_found == -EINVAL) {
783 /* Page not found in guest SLB */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200784 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000785 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
Alexey Kardashevskiy9eecec12017-03-24 17:47:13 +1100786 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000787 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
788 /*
789 * There is already a host HPTE there, presumably
790 * a read-only one for a page the guest thinks
791 * is writable, so get rid of it first.
792 */
793 kvmppc_mmu_unmap_page(vcpu, &pte);
794 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000795 /* The guest's PTE is not mapped yet. Map on the host */
Alexey Kardashevskiybd9166f2017-03-24 17:48:10 +1100796 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
797 /* Exit KVM if mapping failed */
798 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
799 return RESUME_HOST;
800 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000801 if (data)
802 vcpu->stat.sp_storage++;
803 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
Paul Mackerras93b159b2013-09-20 14:52:51 +1000804 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000805 kvmppc_patch_dcbz(vcpu, &pte);
806 } else {
807 /* MMIO */
808 vcpu->stat.mmio_exits++;
809 vcpu->arch.paddr_accessed = pte.raddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100810 vcpu->arch.vaddr_accessed = pte.eaddr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000811 r = kvmppc_emulate_mmio(run, vcpu);
812 if ( r == RESUME_HOST_NV )
813 r = RESUME_HOST;
814 }
815
816 return r;
817}
818
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000819/* Give up external provider (FPU, Altivec, VSX) */
820void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
821{
822 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000823
Paul Mackerras28c483b2012-11-04 18:16:46 +0000824 /*
825 * VSX instructions can access FP and vector registers, so if
826 * we are giving up VSX, make sure we give up FP and VMX as well.
827 */
828 if (msr & MSR_VSX)
829 msr |= MSR_FP | MSR_VEC;
830
831 msr &= vcpu->arch.guest_owned_ext;
832 if (!msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000833 return;
834
835#ifdef DEBUG_EXT
836 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
837#endif
838
Paul Mackerras28c483b2012-11-04 18:16:46 +0000839 if (msr & MSR_FP) {
840 /*
841 * Note that on CPUs with VSX, giveup_fpu stores
842 * both the traditional FP registers and the added VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000843 * registers into thread.fp_state.fpr[].
Paul Mackerras28c483b2012-11-04 18:16:46 +0000844 */
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100845 if (t->regs->msr & MSR_FP)
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000846 giveup_fpu(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100847 t->fp_save_area = NULL;
Paul Mackerras28c483b2012-11-04 18:16:46 +0000848 }
849
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000850#ifdef CONFIG_ALTIVEC
Paul Mackerras28c483b2012-11-04 18:16:46 +0000851 if (msr & MSR_VEC) {
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000852 if (current->thread.regs->msr & MSR_VEC)
853 giveup_altivec(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100854 t->vr_save_area = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000855 }
Paul Mackerras28c483b2012-11-04 18:16:46 +0000856#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000857
Paul Mackerras28c483b2012-11-04 18:16:46 +0000858 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000859 kvmppc_recalc_shadow_msr(vcpu);
860}
861
Alexander Graf616dff82014-04-29 16:48:44 +0200862/* Give up facility (TAR / EBB / DSCR) */
Simon Guo7284ca82018-05-23 15:02:07 +0800863void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
Alexander Graf616dff82014-04-29 16:48:44 +0200864{
865#ifdef CONFIG_PPC_BOOK3S_64
866 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
867 /* Facility not available to the guest, ignore giveup request*/
868 return;
869 }
Alexander Grafe14e7a12014-04-22 12:26:58 +0200870
871 switch (fac) {
872 case FSCR_TAR_LG:
873 vcpu->arch.tar = mfspr(SPRN_TAR);
874 mtspr(SPRN_TAR, current->thread.tar);
875 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
876 break;
877 }
Alexander Graf616dff82014-04-29 16:48:44 +0200878#endif
879}
880
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000881/* Handle external providers (FPU, Altivec, VSX) */
882static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
883 ulong msr)
884{
885 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000886
887 /* When we have paired singles, we emulate in software */
888 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
889 return RESUME_GUEST;
890
Alexander Graf5deb8e72014-04-24 13:46:24 +0200891 if (!(kvmppc_get_msr(vcpu) & msr)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000892 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
893 return RESUME_GUEST;
894 }
895
Paul Mackerras28c483b2012-11-04 18:16:46 +0000896 if (msr == MSR_VSX) {
897 /* No VSX? Give an illegal instruction interrupt */
898#ifdef CONFIG_VSX
899 if (!cpu_has_feature(CPU_FTR_VSX))
900#endif
901 {
902 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
903 return RESUME_GUEST;
904 }
905
906 /*
907 * We have to load up all the FP and VMX registers before
908 * we can let the guest use VSX instructions.
909 */
910 msr = MSR_FP | MSR_VEC | MSR_VSX;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000911 }
912
Paul Mackerras28c483b2012-11-04 18:16:46 +0000913 /* See if we already own all the ext(s) needed */
914 msr &= ~vcpu->arch.guest_owned_ext;
915 if (!msr)
916 return RESUME_GUEST;
917
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000918#ifdef DEBUG_EXT
919 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
920#endif
921
Paul Mackerras28c483b2012-11-04 18:16:46 +0000922 if (msr & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530923 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100924 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100925 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100926 disable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100927 t->fp_save_area = &vcpu->arch.fp;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530928 preempt_enable();
Paul Mackerras28c483b2012-11-04 18:16:46 +0000929 }
930
931 if (msr & MSR_VEC) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000932#ifdef CONFIG_ALTIVEC
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530933 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100934 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100935 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100936 disable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100937 t->vr_save_area = &vcpu->arch.vr;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530938 preempt_enable();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000939#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000940 }
941
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100942 t->regs->msr |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000943 vcpu->arch.guest_owned_ext |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000944 kvmppc_recalc_shadow_msr(vcpu);
945
946 return RESUME_GUEST;
947}
948
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000949/*
950 * Kernel code using FP or VMX could have flushed guest state to
951 * the thread_struct; if so, get it back now.
952 */
953static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
954{
955 unsigned long lost_ext;
956
957 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
958 if (!lost_ext)
959 return;
960
Paul Mackerras09548fd2013-10-15 20:43:01 +1100961 if (lost_ext & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530962 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100963 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100964 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100965 disable_kernel_fp();
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530966 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100967 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000968#ifdef CONFIG_ALTIVEC
Paul Mackerras09548fd2013-10-15 20:43:01 +1100969 if (lost_ext & MSR_VEC) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530970 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100971 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100972 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100973 disable_kernel_altivec();
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530974 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100975 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000976#endif
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000977 current->thread.regs->msr |= lost_ext;
978}
979
Alexander Graf616dff82014-04-29 16:48:44 +0200980#ifdef CONFIG_PPC_BOOK3S_64
981
Simon Guo533082a2018-05-23 15:02:00 +0800982void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
Alexander Graf616dff82014-04-29 16:48:44 +0200983{
984 /* Inject the Interrupt Cause field and trigger a guest interrupt */
985 vcpu->arch.fscr &= ~(0xffULL << 56);
986 vcpu->arch.fscr |= (fac << 56);
987 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
988}
989
990static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
991{
992 enum emulation_result er = EMULATE_FAIL;
993
994 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
995 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
996
997 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
998 /* Couldn't emulate, trigger interrupt in guest */
999 kvmppc_trigger_fac_interrupt(vcpu, fac);
1000 }
1001}
1002
1003/* Enable facilities (TAR, EBB, DSCR) for the guest */
1004static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
1005{
Alexander Graf9916d572014-04-29 17:54:40 +02001006 bool guest_fac_enabled;
Alexander Graf616dff82014-04-29 16:48:44 +02001007 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
1008
Alexander Graf9916d572014-04-29 17:54:40 +02001009 /*
1010 * Not every facility is enabled by FSCR bits, check whether the
1011 * guest has this facility enabled at all.
1012 */
1013 switch (fac) {
1014 case FSCR_TAR_LG:
1015 case FSCR_EBB_LG:
1016 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
1017 break;
1018 case FSCR_TM_LG:
1019 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
1020 break;
1021 default:
1022 guest_fac_enabled = false;
1023 break;
1024 }
1025
1026 if (!guest_fac_enabled) {
Alexander Graf616dff82014-04-29 16:48:44 +02001027 /* Facility not enabled by the guest */
1028 kvmppc_trigger_fac_interrupt(vcpu, fac);
1029 return RESUME_GUEST;
1030 }
1031
1032 switch (fac) {
Alexander Grafe14e7a12014-04-22 12:26:58 +02001033 case FSCR_TAR_LG:
1034 /* TAR switching isn't lazy in Linux yet */
1035 current->thread.tar = mfspr(SPRN_TAR);
1036 mtspr(SPRN_TAR, vcpu->arch.tar);
1037 vcpu->arch.shadow_fscr |= FSCR_TAR;
1038 break;
Alexander Graf616dff82014-04-29 16:48:44 +02001039 default:
1040 kvmppc_emulate_fac(vcpu, fac);
1041 break;
1042 }
1043
Simon Guo19c585e2018-05-23 15:02:02 +08001044#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1045 /* Since we disabled MSR_TM at privilege state, the mfspr instruction
1046 * for TM spr can trigger TM fac unavailable. In this case, the
1047 * emulation is handled by kvmppc_emulate_fac(), which invokes
1048 * kvmppc_emulate_mfspr() finally. But note the mfspr can include
1049 * RT for NV registers. So it need to restore those NV reg to reflect
1050 * the update.
1051 */
1052 if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
1053 return RESUME_GUEST_NV;
1054#endif
1055
Alexander Graf616dff82014-04-29 16:48:44 +02001056 return RESUME_GUEST;
1057}
Alexander Graf8e6afa32014-07-31 10:21:59 +02001058
1059void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
1060{
1061 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
1062 /* TAR got dropped, drop it in shadow too */
1063 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Simon Guo7284ca82018-05-23 15:02:07 +08001064 } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
1065 vcpu->arch.fscr = fscr;
1066 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
1067 return;
Alexander Graf8e6afa32014-07-31 10:21:59 +02001068 }
Simon Guo7284ca82018-05-23 15:02:07 +08001069
Alexander Graf8e6afa32014-07-31 10:21:59 +02001070 vcpu->arch.fscr = fscr;
1071}
Alexander Graf616dff82014-04-29 16:48:44 +02001072#endif
1073
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001074static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
1075{
1076 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1077 u64 msr = kvmppc_get_msr(vcpu);
1078
1079 kvmppc_set_msr(vcpu, msr | MSR_SE);
1080 }
1081}
1082
1083static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
1084{
1085 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1086 u64 msr = kvmppc_get_msr(vcpu);
1087
1088 kvmppc_set_msr(vcpu, msr & ~MSR_SE);
1089 }
1090}
1091
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001092static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
1093 unsigned int exit_nr)
1094{
1095 enum emulation_result er;
1096 ulong flags;
1097 u32 last_inst;
1098 int emul, r;
1099
1100 /*
1101 * shadow_srr1 only contains valid flags if we came here via a program
1102 * exception. The other exceptions (emulation assist, FP unavailable,
1103 * etc.) do not provide flags in SRR1, so use an illegal-instruction
1104 * exception when injecting a program interrupt into the guest.
1105 */
1106 if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
1107 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
1108 else
1109 flags = SRR1_PROGILL;
1110
1111 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1112 if (emul != EMULATE_DONE)
1113 return RESUME_GUEST;
1114
1115 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1116#ifdef EXIT_DEBUG
1117 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1118 kvmppc_get_pc(vcpu), last_inst);
1119#endif
1120 if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
1121 kvmppc_core_queue_program(vcpu, flags);
1122 return RESUME_GUEST;
1123 }
1124 }
1125
1126 vcpu->stat.emulated_inst_exits++;
1127 er = kvmppc_emulate_instruction(run, vcpu);
1128 switch (er) {
1129 case EMULATE_DONE:
1130 r = RESUME_GUEST_NV;
1131 break;
1132 case EMULATE_AGAIN:
1133 r = RESUME_GUEST;
1134 break;
1135 case EMULATE_FAIL:
1136 pr_crit("%s: emulation at %lx failed (%08x)\n",
1137 __func__, kvmppc_get_pc(vcpu), last_inst);
1138 kvmppc_core_queue_program(vcpu, flags);
1139 r = RESUME_GUEST;
1140 break;
1141 case EMULATE_DO_MMIO:
1142 run->exit_reason = KVM_EXIT_MMIO;
1143 r = RESUME_HOST_NV;
1144 break;
1145 case EMULATE_EXIT_USER:
1146 r = RESUME_HOST_NV;
1147 break;
1148 default:
1149 BUG();
1150 }
1151
1152 return r;
1153}
1154
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301155int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
1156 unsigned int exit_nr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001157{
1158 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +02001159 int s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001160
1161 vcpu->stat.sum_exits++;
1162
1163 run->exit_reason = KVM_EXIT_UNKNOWN;
1164 run->ready_for_interrupt_injection = 1;
1165
Alexander Grafbd2be682012-08-13 01:04:19 +02001166 /* We get here with MSR.EE=1 */
Alexander Graf3b1d9d72012-04-30 10:56:12 +02001167
Alexander Graf97c95052012-08-02 15:10:00 +02001168 trace_kvm_exit(exit_nr, vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001169 guest_exit();
Alexander Grafc63ddcb2012-08-12 11:27:49 +02001170
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001171 switch (exit_nr) {
1172 case BOOK3S_INTERRUPT_INST_STORAGE:
Alexander Graf468a12c2011-12-09 14:44:13 +01001173 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001174 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001175 vcpu->stat.pf_instruc++;
1176
Alexander Grafc01e3f62014-07-11 02:58:58 +02001177 if (kvmppc_is_split_real(vcpu))
1178 kvmppc_fixup_split_real(vcpu);
1179
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001180#ifdef CONFIG_PPC_BOOK3S_32
1181 /* We set segments as unused segments when invalidating them. So
1182 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001183 {
1184 struct kvmppc_book3s_shadow_vcpu *svcpu;
1185 u32 sr;
1186
1187 svcpu = svcpu_get(vcpu);
1188 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +01001189 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001190 if (sr == SR_INVALID) {
1191 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1192 r = RESUME_GUEST;
1193 break;
1194 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001195 }
1196#endif
1197
1198 /* only care about PTEG not found errors, but leave NX alone */
Alexander Graf468a12c2011-12-09 14:44:13 +01001199 if (shadow_srr1 & 0x40000000) {
Paul Mackerras93b159b2013-09-20 14:52:51 +10001200 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001201 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +10001202 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001203 vcpu->stat.sp_instruc++;
1204 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1205 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1206 /*
1207 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1208 * so we can't use the NX bit inside the guest. Let's cross our fingers,
1209 * that no guest that needs the dcbz hack does NX.
1210 */
1211 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1212 r = RESUME_GUEST;
1213 } else {
Paul Mackerras916ccad2018-06-07 18:04:37 +10001214 kvmppc_core_queue_inst_storage(vcpu,
1215 shadow_srr1 & 0x58000000);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001216 r = RESUME_GUEST;
1217 }
1218 break;
Alexander Graf468a12c2011-12-09 14:44:13 +01001219 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001220 case BOOK3S_INTERRUPT_DATA_STORAGE:
1221 {
1222 ulong dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001223 u32 fault_dsisr = vcpu->arch.fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001224 vcpu->stat.pf_storage++;
1225
1226#ifdef CONFIG_PPC_BOOK3S_32
1227 /* We set segments as unused segments when invalidating them. So
1228 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001229 {
1230 struct kvmppc_book3s_shadow_vcpu *svcpu;
1231 u32 sr;
1232
1233 svcpu = svcpu_get(vcpu);
1234 sr = svcpu->sr[dar >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +01001235 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001236 if (sr == SR_INVALID) {
1237 kvmppc_mmu_map_segment(vcpu, dar);
1238 r = RESUME_GUEST;
1239 break;
1240 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001241 }
1242#endif
1243
Paul Mackerras93b159b2013-09-20 14:52:51 +10001244 /*
1245 * We need to handle missing shadow PTEs, and
1246 * protection faults due to us mapping a page read-only
1247 * when the guest thinks it is writable.
1248 */
1249 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
1250 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001251 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +10001252 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001253 } else {
Paul Mackerras916ccad2018-06-07 18:04:37 +10001254 kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001255 r = RESUME_GUEST;
1256 }
1257 break;
1258 }
1259 case BOOK3S_INTERRUPT_DATA_SEGMENT:
1260 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001261 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001262 kvmppc_book3s_queue_irqprio(vcpu,
1263 BOOK3S_INTERRUPT_DATA_SEGMENT);
1264 }
1265 r = RESUME_GUEST;
1266 break;
1267 case BOOK3S_INTERRUPT_INST_SEGMENT:
1268 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1269 kvmppc_book3s_queue_irqprio(vcpu,
1270 BOOK3S_INTERRUPT_INST_SEGMENT);
1271 }
1272 r = RESUME_GUEST;
1273 break;
1274 /* We're good on these - the host merely wanted to get our attention */
1275 case BOOK3S_INTERRUPT_DECREMENTER:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001276 case BOOK3S_INTERRUPT_HV_DECREMENTER:
Paul Mackerras40688902014-01-08 21:25:36 +11001277 case BOOK3S_INTERRUPT_DOORBELL:
Alexander Graf568fccc2014-06-16 16:37:38 +02001278 case BOOK3S_INTERRUPT_H_DOORBELL:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001279 vcpu->stat.dec_exits++;
1280 r = RESUME_GUEST;
1281 break;
1282 case BOOK3S_INTERRUPT_EXTERNAL:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001283 case BOOK3S_INTERRUPT_EXTERNAL_HV:
Cameron Kaiserb71dc512018-06-05 07:48:55 -07001284 case BOOK3S_INTERRUPT_H_VIRT:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001285 vcpu->stat.ext_intr_exits++;
1286 r = RESUME_GUEST;
1287 break;
Cameron Kaiserb71dc512018-06-05 07:48:55 -07001288 case BOOK3S_INTERRUPT_HMI:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001289 case BOOK3S_INTERRUPT_PERFMON:
Cameron Kaiserb71dc512018-06-05 07:48:55 -07001290 case BOOK3S_INTERRUPT_SYSTEM_RESET:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001291 r = RESUME_GUEST;
1292 break;
1293 case BOOK3S_INTERRUPT_PROGRAM:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001294 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001295 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001296 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001297 case BOOK3S_INTERRUPT_SYSCALL:
Mihai Caraman51f04722014-07-23 19:06:21 +03001298 {
1299 u32 last_sc;
1300 int emul;
1301
1302 /* Get last sc for papr */
1303 if (vcpu->arch.papr_enabled) {
1304 /* The sc instuction points SRR0 to the next inst */
1305 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1306 if (emul != EMULATE_DONE) {
1307 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1308 r = RESUME_GUEST;
1309 break;
1310 }
1311 }
1312
Alexander Grafa668f2b2011-08-08 17:26:24 +02001313 if (vcpu->arch.papr_enabled &&
Mihai Caraman51f04722014-07-23 19:06:21 +03001314 (last_sc == 0x44000022) &&
Alexander Graf5deb8e72014-04-24 13:46:24 +02001315 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
Alexander Grafa668f2b2011-08-08 17:26:24 +02001316 /* SC 1 papr hypercalls */
1317 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1318 int i;
1319
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301320#ifdef CONFIG_PPC_BOOK3S_64
Alexander Grafa668f2b2011-08-08 17:26:24 +02001321 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1322 r = RESUME_GUEST;
1323 break;
1324 }
Andreas Schwab96f38d72011-11-08 07:17:39 +00001325#endif
Alexander Grafa668f2b2011-08-08 17:26:24 +02001326
1327 run->papr_hcall.nr = cmd;
1328 for (i = 0; i < 9; ++i) {
1329 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1330 run->papr_hcall.args[i] = gpr;
1331 }
1332 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1333 vcpu->arch.hcall_needed = 1;
1334 r = RESUME_HOST;
1335 } else if (vcpu->arch.osi_enabled &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001336 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1337 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1338 /* MOL hypercalls */
1339 u64 *gprs = run->osi.gprs;
1340 int i;
1341
1342 run->exit_reason = KVM_EXIT_OSI;
1343 for (i = 0; i < 32; i++)
1344 gprs[i] = kvmppc_get_gpr(vcpu, i);
1345 vcpu->arch.osi_needed = 1;
1346 r = RESUME_HOST_NV;
Alexander Graf5deb8e72014-04-24 13:46:24 +02001347 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001348 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1349 /* KVM PV hypercalls */
1350 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1351 r = RESUME_GUEST;
1352 } else {
1353 /* Guest syscalls */
1354 vcpu->stat.syscall_exits++;
1355 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1356 r = RESUME_GUEST;
1357 }
1358 break;
Mihai Caraman51f04722014-07-23 19:06:21 +03001359 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001360 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1361 case BOOK3S_INTERRUPT_ALTIVEC:
1362 case BOOK3S_INTERRUPT_VSX:
1363 {
1364 int ext_msr = 0;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001365 int emul;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001366 u32 last_inst;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001367
Mihai Caraman9a26af62014-07-23 19:06:20 +03001368 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1369 /* Do paired single instruction emulation */
Mihai Caraman51f04722014-07-23 19:06:21 +03001370 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1371 &last_inst);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001372 if (emul == EMULATE_DONE)
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001373 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001374 else
1375 r = RESUME_GUEST;
1376
1377 break;
1378 }
1379
1380 /* Enable external provider */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001381 switch (exit_nr) {
Mihai Caraman9a26af62014-07-23 19:06:20 +03001382 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1383 ext_msr = MSR_FP;
1384 break;
1385
1386 case BOOK3S_INTERRUPT_ALTIVEC:
1387 ext_msr = MSR_VEC;
1388 break;
1389
1390 case BOOK3S_INTERRUPT_VSX:
1391 ext_msr = MSR_VSX;
1392 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001393 }
1394
Mihai Caraman9a26af62014-07-23 19:06:20 +03001395 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001396 break;
1397 }
1398 case BOOK3S_INTERRUPT_ALIGNMENT:
Mihai Caraman9a26af62014-07-23 19:06:20 +03001399 {
Mihai Caraman51f04722014-07-23 19:06:21 +03001400 u32 last_inst;
1401 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001402
1403 if (emul == EMULATE_DONE) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001404 u32 dsisr;
1405 u64 dar;
1406
1407 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1408 dar = kvmppc_alignment_dar(vcpu, last_inst);
1409
1410 kvmppc_set_dsisr(vcpu, dsisr);
1411 kvmppc_set_dar(vcpu, dar);
1412
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001413 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1414 }
1415 r = RESUME_GUEST;
1416 break;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001417 }
Alexander Graf616dff82014-04-29 16:48:44 +02001418#ifdef CONFIG_PPC_BOOK3S_64
1419 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
Simon Guo19c585e2018-05-23 15:02:02 +08001420 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
Alexander Graf616dff82014-04-29 16:48:44 +02001421 break;
1422#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001423 case BOOK3S_INTERRUPT_MACHINE_CHECK:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001424 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1425 r = RESUME_GUEST;
1426 break;
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001427 case BOOK3S_INTERRUPT_TRACE:
1428 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1429 run->exit_reason = KVM_EXIT_DEBUG;
1430 r = RESUME_HOST;
1431 } else {
1432 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1433 r = RESUME_GUEST;
1434 }
1435 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001436 default:
Alexander Graf468a12c2011-12-09 14:44:13 +01001437 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001438 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001439 /* Ugh - bork here! What did we get? */
1440 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
Alexander Graf468a12c2011-12-09 14:44:13 +01001441 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001442 r = RESUME_HOST;
1443 BUG();
1444 break;
1445 }
Alexander Graf468a12c2011-12-09 14:44:13 +01001446 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001447
1448 if (!(r & RESUME_HOST)) {
1449 /* To avoid clobbering exit_reason, only check for signals if
1450 * we aren't already exiting to userspace for some other
1451 * reason. */
Alexander Grafe371f712011-12-19 13:36:55 +01001452
1453 /*
1454 * Interrupts could be timers for the guest which we have to
1455 * inject again, so let's postpone them until we're in the guest
1456 * and if we really did time things so badly, then we just exit
1457 * again due to a host external interrupt.
1458 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001459 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001460 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001461 r = s;
Scott Wood6c85f522014-01-09 19:18:40 -06001462 else {
1463 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001464 kvmppc_fix_ee_before_entry();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001465 }
Scott Wood6c85f522014-01-09 19:18:40 -06001466
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +10001467 kvmppc_handle_lost_ext(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001468 }
1469
1470 trace_kvm_book3s_reenter(r, vcpu);
1471
1472 return r;
1473}
1474
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301475static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1476 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001477{
1478 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1479 int i;
1480
1481 sregs->pvr = vcpu->arch.pvr;
1482
1483 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1484 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1485 for (i = 0; i < 64; i++) {
1486 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1487 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1488 }
1489 } else {
1490 for (i = 0; i < 16; i++)
Alexander Graf5deb8e72014-04-24 13:46:24 +02001491 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001492
1493 for (i = 0; i < 8; i++) {
1494 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1495 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1496 }
1497 }
1498
1499 return 0;
1500}
1501
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301502static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1503 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001504{
1505 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1506 int i;
1507
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301508 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001509
1510 vcpu3s->sdr1 = sregs->u.s.sdr1;
Greg Kurzf4093ee2017-10-16 12:29:44 +02001511#ifdef CONFIG_PPC_BOOK3S_64
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001512 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
Greg Kurzf4093ee2017-10-16 12:29:44 +02001513 /* Flush all SLB entries */
1514 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1515 vcpu->arch.mmu.slbia(vcpu);
1516
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001517 for (i = 0; i < 64; i++) {
Greg Kurzf4093ee2017-10-16 12:29:44 +02001518 u64 rb = sregs->u.s.ppc64.slb[i].slbe;
1519 u64 rs = sregs->u.s.ppc64.slb[i].slbv;
1520
1521 if (rb & SLB_ESID_V)
1522 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001523 }
Greg Kurzf4093ee2017-10-16 12:29:44 +02001524 } else
1525#endif
1526 {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001527 for (i = 0; i < 16; i++) {
1528 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1529 }
1530 for (i = 0; i < 8; i++) {
1531 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1532 (u32)sregs->u.s.ppc32.ibat[i]);
1533 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1534 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1535 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1536 (u32)sregs->u.s.ppc32.dbat[i]);
1537 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1538 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1539 }
1540 }
1541
1542 /* Flush the MMU after messing with the segments */
1543 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1544
1545 return 0;
1546}
1547
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301548static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1549 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001550{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001551 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001552
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001553 switch (id) {
Madhavan Srinivasana59c1d92014-09-09 22:37:35 +05301554 case KVM_REG_PPC_DEBUG_INST:
1555 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1556 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001557 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001558 *val = get_reg_val(id, to_book3s(vcpu)->hior);
Paul Mackerras31f34382011-12-12 12:26:50 +00001559 break;
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001560 case KVM_REG_PPC_VTB:
1561 *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1562 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301563 case KVM_REG_PPC_LPCR:
Alexey Kardashevskiya0840242014-07-19 17:59:34 +10001564 case KVM_REG_PPC_LPCR_64:
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301565 /*
1566 * We are only interested in the LPCR_ILE bit
1567 */
1568 if (vcpu->arch.intr_msr & MSR_LE)
1569 *val = get_reg_val(id, LPCR_ILE);
1570 else
1571 *val = get_reg_val(id, 0);
1572 break;
Simon Guodeeb8792018-05-23 15:02:12 +08001573#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1574 case KVM_REG_PPC_TFHAR:
1575 *val = get_reg_val(id, vcpu->arch.tfhar);
1576 break;
1577 case KVM_REG_PPC_TFIAR:
1578 *val = get_reg_val(id, vcpu->arch.tfiar);
1579 break;
1580 case KVM_REG_PPC_TEXASR:
1581 *val = get_reg_val(id, vcpu->arch.texasr);
1582 break;
1583 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1584 *val = get_reg_val(id,
1585 vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
1586 break;
1587 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1588 {
1589 int i, j;
1590
1591 i = id - KVM_REG_PPC_TM_VSR0;
1592 if (i < 32)
1593 for (j = 0; j < TS_FPRWIDTH; j++)
1594 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1595 else {
1596 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1597 val->vval = vcpu->arch.vr_tm.vr[i-32];
1598 else
1599 r = -ENXIO;
1600 }
1601 break;
1602 }
1603 case KVM_REG_PPC_TM_CR:
1604 *val = get_reg_val(id, vcpu->arch.cr_tm);
1605 break;
1606 case KVM_REG_PPC_TM_XER:
1607 *val = get_reg_val(id, vcpu->arch.xer_tm);
1608 break;
1609 case KVM_REG_PPC_TM_LR:
1610 *val = get_reg_val(id, vcpu->arch.lr_tm);
1611 break;
1612 case KVM_REG_PPC_TM_CTR:
1613 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1614 break;
1615 case KVM_REG_PPC_TM_FPSCR:
1616 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1617 break;
1618 case KVM_REG_PPC_TM_AMR:
1619 *val = get_reg_val(id, vcpu->arch.amr_tm);
1620 break;
1621 case KVM_REG_PPC_TM_PPR:
1622 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1623 break;
1624 case KVM_REG_PPC_TM_VRSAVE:
1625 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1626 break;
1627 case KVM_REG_PPC_TM_VSCR:
1628 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1629 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1630 else
1631 r = -ENXIO;
1632 break;
1633 case KVM_REG_PPC_TM_DSCR:
1634 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1635 break;
1636 case KVM_REG_PPC_TM_TAR:
1637 *val = get_reg_val(id, vcpu->arch.tar_tm);
1638 break;
1639#endif
Paul Mackerras31f34382011-12-12 12:26:50 +00001640 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001641 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001642 break;
1643 }
1644
1645 return r;
1646}
1647
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301648static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1649{
1650 if (new_lpcr & LPCR_ILE)
1651 vcpu->arch.intr_msr |= MSR_LE;
1652 else
1653 vcpu->arch.intr_msr &= ~MSR_LE;
1654}
1655
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301656static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1657 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001658{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001659 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001660
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001661 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001662 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001663 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1664 to_book3s(vcpu)->hior_explicit = true;
Paul Mackerras31f34382011-12-12 12:26:50 +00001665 break;
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001666 case KVM_REG_PPC_VTB:
1667 to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1668 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301669 case KVM_REG_PPC_LPCR:
Alexey Kardashevskiya0840242014-07-19 17:59:34 +10001670 case KVM_REG_PPC_LPCR_64:
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301671 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1672 break;
Simon Guodeeb8792018-05-23 15:02:12 +08001673#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1674 case KVM_REG_PPC_TFHAR:
1675 vcpu->arch.tfhar = set_reg_val(id, *val);
1676 break;
1677 case KVM_REG_PPC_TFIAR:
1678 vcpu->arch.tfiar = set_reg_val(id, *val);
1679 break;
1680 case KVM_REG_PPC_TEXASR:
1681 vcpu->arch.texasr = set_reg_val(id, *val);
1682 break;
1683 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1684 vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
1685 set_reg_val(id, *val);
1686 break;
1687 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1688 {
1689 int i, j;
1690
1691 i = id - KVM_REG_PPC_TM_VSR0;
1692 if (i < 32)
1693 for (j = 0; j < TS_FPRWIDTH; j++)
1694 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1695 else
1696 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1697 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1698 else
1699 r = -ENXIO;
1700 break;
1701 }
1702 case KVM_REG_PPC_TM_CR:
1703 vcpu->arch.cr_tm = set_reg_val(id, *val);
1704 break;
1705 case KVM_REG_PPC_TM_XER:
1706 vcpu->arch.xer_tm = set_reg_val(id, *val);
1707 break;
1708 case KVM_REG_PPC_TM_LR:
1709 vcpu->arch.lr_tm = set_reg_val(id, *val);
1710 break;
1711 case KVM_REG_PPC_TM_CTR:
1712 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1713 break;
1714 case KVM_REG_PPC_TM_FPSCR:
1715 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1716 break;
1717 case KVM_REG_PPC_TM_AMR:
1718 vcpu->arch.amr_tm = set_reg_val(id, *val);
1719 break;
1720 case KVM_REG_PPC_TM_PPR:
1721 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1722 break;
1723 case KVM_REG_PPC_TM_VRSAVE:
1724 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1725 break;
1726 case KVM_REG_PPC_TM_VSCR:
1727 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1728 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1729 else
1730 r = -ENXIO;
1731 break;
1732 case KVM_REG_PPC_TM_DSCR:
1733 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1734 break;
1735 case KVM_REG_PPC_TM_TAR:
1736 vcpu->arch.tar_tm = set_reg_val(id, *val);
1737 break;
1738#endif
Paul Mackerras31f34382011-12-12 12:26:50 +00001739 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001740 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001741 break;
1742 }
1743
1744 return r;
1745}
1746
Sean Christophersonff030fd2019-12-18 13:55:00 -08001747static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001748{
1749 struct kvmppc_vcpu_book3s *vcpu_book3s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001750 unsigned long p;
Sean Christophersond3076952019-12-18 13:54:58 -08001751 int err;
1752
Sean Christophersond3076952019-12-18 13:54:58 -08001753 err = -ENOMEM;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001754
Paul Mackerras3ff95502013-09-20 14:52:49 +10001755 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1756 if (!vcpu_book3s)
Sean Christophersonff030fd2019-12-18 13:55:00 -08001757 goto out;
Paul Mackerras3ff95502013-09-20 14:52:49 +10001758 vcpu->arch.book3s = vcpu_book3s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001759
Alexander Grafab784752014-04-06 23:31:48 +02001760#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001761 vcpu->arch.shadow_vcpu =
1762 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1763 if (!vcpu->arch.shadow_vcpu)
1764 goto free_vcpu3s;
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001765#endif
Paul Mackerras3ff95502013-09-20 14:52:49 +10001766
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001767 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001768 if (!p)
Sean Christophersond3076952019-12-18 13:54:58 -08001769 goto free_shadow_vcpu;
Alexander Graf89b68c92014-07-13 16:37:12 +02001770 vcpu->arch.shared = (void *)p;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001771#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf5deb8e72014-04-24 13:46:24 +02001772 /* Always start the shared struct in native endian mode */
1773#ifdef __BIG_ENDIAN__
1774 vcpu->arch.shared_big_endian = true;
1775#else
1776 vcpu->arch.shared_big_endian = false;
1777#endif
1778
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001779 /*
1780 * Default to the same as the host if we're on sufficiently
1781 * recent machine that we have 1TB segments;
1782 * otherwise default to PPC970FX.
1783 */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001784 vcpu->arch.pvr = 0x3C0301;
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001785 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1786 vcpu->arch.pvr = mfspr(SPRN_PVR);
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301787 vcpu->arch.intr_msr = MSR_SF;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001788#else
1789 /* default to book3s_32 (750) */
1790 vcpu->arch.pvr = 0x84202;
Nicholas Piggin87a45e02019-10-02 16:00:22 +10001791 vcpu->arch.intr_msr = 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001792#endif
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301793 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001794 vcpu->arch.slb_nr = 64;
1795
Alexander Graf94810ba2014-04-24 13:04:01 +02001796 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001797
1798 err = kvmppc_mmu_init(vcpu);
1799 if (err < 0)
Sean Christophersoncb10bf92019-12-18 13:54:47 -08001800 goto free_shared_page;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001801
Sean Christophersonc50bfbd2019-12-18 13:54:57 -08001802 return 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001803
Sean Christophersoncb10bf92019-12-18 13:54:47 -08001804free_shared_page:
1805 free_page((unsigned long)vcpu->arch.shared);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001806free_shadow_vcpu:
Alexander Grafab784752014-04-06 23:31:48 +02001807#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001808 kfree(vcpu->arch.shadow_vcpu);
1809free_vcpu3s:
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001810#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001811 vfree(vcpu_book3s);
Sean Christophersonff030fd2019-12-18 13:55:00 -08001812out:
Sean Christophersonc50bfbd2019-12-18 13:54:57 -08001813 return err;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001814}
1815
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301816static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001817{
1818 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1819
Greg Kurz1d0c32e2020-03-18 18:43:30 +01001820 kvmppc_mmu_destroy_pr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001821 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
Alexander Grafab784752014-04-06 23:31:48 +02001822#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001823 kfree(vcpu->arch.shadow_vcpu);
1824#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001825 vfree(vcpu_book3s);
1826}
1827
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301828static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001829{
1830 int ret;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001831#ifdef CONFIG_ALTIVEC
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001832 unsigned long uninitialized_var(vrsave);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001833#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001834
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001835 /* Check if we can run the vcpu at all */
1836 if (!vcpu->arch.sane) {
1837 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
Alexander Graf7d827142011-12-09 15:46:21 +01001838 ret = -EINVAL;
1839 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001840 }
1841
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001842 kvmppc_setup_debug(vcpu);
1843
Alexander Grafe371f712011-12-19 13:36:55 +01001844 /*
1845 * Interrupts could be timers for the guest which we have to inject
1846 * again, so let's postpone them until we're in the guest and if we
1847 * really did time things so badly, then we just exit again due to
1848 * a host external interrupt.
1849 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001850 ret = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001851 if (ret <= 0)
Alexander Graf7d827142011-12-09 15:46:21 +01001852 goto out;
Scott Wood6c85f522014-01-09 19:18:40 -06001853 /* interrupts now hard-disabled */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001854
Anton Blanchardc2085052015-10-29 11:44:08 +11001855 /* Save FPU, Altivec and VSX state */
1856 giveup_all(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001857
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001858 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +02001859 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001860 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1861
Scott Wood5f1c2482013-07-10 17:47:39 -05001862 kvmppc_fix_ee_before_entry();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001863
1864 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1865
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001866 kvmppc_clear_debug(vcpu);
1867
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001868 /* No need for guest_exit. It's done in handle_exit.
Alexander Graf24afa372012-08-12 12:42:30 +02001869 We also get here with interrupts enabled. */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001870
Paul Mackerras28c483b2012-11-04 18:16:46 +00001871 /* Make sure we save the guest FPU/Altivec/VSX state */
1872 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1873
Alexander Grafe14e7a12014-04-22 12:26:58 +02001874 /* Make sure we save the guest TAR/EBB/DSCR state */
1875 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1876
Alexander Graf7d827142011-12-09 15:46:21 +01001877out:
Alexander Graf0652eaa2012-08-12 11:34:21 +02001878 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001879 return ret;
1880}
1881
Paul Mackerras82ed3612011-12-15 02:03:22 +00001882/*
1883 * Get (and clear) the dirty memory log for a memory slot.
1884 */
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301885static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1886 struct kvm_dirty_log *log)
Paul Mackerras82ed3612011-12-15 02:03:22 +00001887{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001888 struct kvm_memslots *slots;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001889 struct kvm_memory_slot *memslot;
1890 struct kvm_vcpu *vcpu;
1891 ulong ga, ga_end;
1892 int is_dirty = 0;
1893 int r;
1894 unsigned long n;
1895
1896 mutex_lock(&kvm->slots_lock);
1897
1898 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1899 if (r)
1900 goto out;
1901
1902 /* If nothing is dirty, don't bother messing with page tables. */
1903 if (is_dirty) {
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001904 slots = kvm_memslots(kvm);
1905 memslot = id_to_memslot(slots, log->slot);
Paul Mackerras82ed3612011-12-15 02:03:22 +00001906
1907 ga = memslot->base_gfn << PAGE_SHIFT;
1908 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1909
1910 kvm_for_each_vcpu(n, vcpu, kvm)
1911 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1912
1913 n = kvm_dirty_bitmap_bytes(memslot);
1914 memset(memslot->dirty_bitmap, 0, n);
1915 }
1916
1917 r = 0;
1918out:
1919 mutex_unlock(&kvm->slots_lock);
1920 return r;
1921}
1922
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301923static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1924 struct kvm_memory_slot *memslot)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001925{
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301926 return;
1927}
1928
1929static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1930 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001931 const struct kvm_userspace_memory_region *mem)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301932{
1933 return 0;
1934}
1935
1936static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001937 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001938 const struct kvm_memory_slot *old,
Bharata B Raof032b732018-12-12 15:15:30 +11001939 const struct kvm_memory_slot *new,
1940 enum kvm_mr_change change)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301941{
1942 return;
1943}
1944
1945static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1946 struct kvm_memory_slot *dont)
1947{
1948 return;
1949}
1950
1951static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1952 unsigned long npages)
1953{
1954 return 0;
1955}
1956
1957
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001958#ifdef CONFIG_PPC64
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301959static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1960 struct kvm_ppc_smmu_info *info)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001961{
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001962 long int i;
1963 struct kvm_vcpu *vcpu;
1964
1965 info->flags = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001966
1967 /* SLB is always 64 entries */
1968 info->slb_size = 64;
1969
1970 /* Standard 4k base page size segment */
1971 info->sps[0].page_shift = 12;
1972 info->sps[0].slb_enc = 0;
1973 info->sps[0].enc[0].page_shift = 12;
1974 info->sps[0].enc[0].pte_enc = 0;
1975
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001976 /*
1977 * 64k large page size.
1978 * We only want to put this in if the CPUs we're emulating
1979 * support it, but unfortunately we don't have a vcpu easily
1980 * to hand here to test. Just pick the first vcpu, and if
1981 * that doesn't exist yet, report the minimum capability,
1982 * i.e., no 64k pages.
1983 * 1T segment support goes along with 64k pages.
1984 */
1985 i = 1;
1986 vcpu = kvm_get_vcpu(kvm, 0);
1987 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1988 info->flags = KVM_PPC_1T_SEGMENTS;
1989 info->sps[i].page_shift = 16;
1990 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1991 info->sps[i].enc[0].page_shift = 16;
1992 info->sps[i].enc[0].pte_enc = 1;
1993 ++i;
1994 }
1995
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001996 /* Standard 16M large page size segment */
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001997 info->sps[i].page_shift = 24;
1998 info->sps[i].slb_enc = SLB_VSID_L;
1999 info->sps[i].enc[0].page_shift = 24;
2000 info->sps[i].enc[0].pte_enc = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00002001
2002 return 0;
2003}
Paul Mackerras9617a0b2018-05-30 15:47:17 +10002004
2005static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
2006{
2007 if (!cpu_has_feature(CPU_FTR_ARCH_300))
2008 return -ENODEV;
2009 /* Require flags and process table base and size to all be zero. */
2010 if (cfg->flags || cfg->process_table)
2011 return -EINVAL;
2012 return 0;
2013}
2014
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302015#else
2016static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
2017 struct kvm_ppc_smmu_info *info)
2018{
2019 /* We should not get called */
2020 BUG();
David Michaelfd24a862020-01-26 17:31:58 -05002021 return 0;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302022}
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00002023#endif /* CONFIG_PPC64 */
2024
Ian Munsiea413f472012-12-03 18:36:13 +00002025static unsigned int kvm_global_user_count = 0;
2026static DEFINE_SPINLOCK(kvm_global_user_count_lock);
2027
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302028static int kvmppc_core_init_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00002029{
Paul Mackerras9308ab82013-09-20 14:52:48 +10002030 mutex_init(&kvm->arch.hpt_mutex);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00002031
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002032#ifdef CONFIG_PPC_BOOK3S_64
2033 /* Start out with the default set of hcalls enabled */
2034 kvmppc_pr_init_default_hcalls(kvm);
2035#endif
2036
Ian Munsiea413f472012-12-03 18:36:13 +00002037 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
2038 spin_lock(&kvm_global_user_count_lock);
2039 if (++kvm_global_user_count == 1)
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +10002040 pseries_disable_reloc_on_exc();
Ian Munsiea413f472012-12-03 18:36:13 +00002041 spin_unlock(&kvm_global_user_count_lock);
2042 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00002043 return 0;
2044}
2045
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302046static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00002047{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00002048#ifdef CONFIG_PPC64
2049 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
2050#endif
Ian Munsiea413f472012-12-03 18:36:13 +00002051
2052 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
2053 spin_lock(&kvm_global_user_count_lock);
2054 BUG_ON(kvm_global_user_count == 0);
2055 if (--kvm_global_user_count == 0)
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +10002056 pseries_enable_reloc_on_exc();
Ian Munsiea413f472012-12-03 18:36:13 +00002057 spin_unlock(&kvm_global_user_count_lock);
2058 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00002059}
2060
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302061static int kvmppc_core_check_processor_compat_pr(void)
2062{
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10002063 /*
Paul Mackerrasec531d02018-05-18 21:49:28 +10002064 * PR KVM can work on POWER9 inside a guest partition
2065 * running in HPT mode. It can't work if we are using
2066 * radix translation (because radix provides no way for
Paul Mackerrasdb96a042018-06-07 18:08:02 +10002067 * a process to have unique translations in quadrant 3).
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10002068 */
Paul Mackerrasdb96a042018-06-07 18:08:02 +10002069 if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10002070 return -EIO;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302071 return 0;
2072}
2073
2074static long kvm_arch_vm_ioctl_pr(struct file *filp,
2075 unsigned int ioctl, unsigned long arg)
2076{
2077 return -ENOTTY;
2078}
2079
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302080static struct kvmppc_ops kvm_ops_pr = {
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302081 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
2082 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
2083 .get_one_reg = kvmppc_get_one_reg_pr,
2084 .set_one_reg = kvmppc_set_one_reg_pr,
2085 .vcpu_load = kvmppc_core_vcpu_load_pr,
2086 .vcpu_put = kvmppc_core_vcpu_put_pr,
Nicholas Piggin87a45e02019-10-02 16:00:22 +10002087 .inject_interrupt = kvmppc_inject_interrupt_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302088 .set_msr = kvmppc_set_msr_pr,
2089 .vcpu_run = kvmppc_vcpu_run_pr,
2090 .vcpu_create = kvmppc_core_vcpu_create_pr,
2091 .vcpu_free = kvmppc_core_vcpu_free_pr,
2092 .check_requests = kvmppc_core_check_requests_pr,
2093 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
2094 .flush_memslot = kvmppc_core_flush_memslot_pr,
2095 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
2096 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302097 .unmap_hva_range = kvm_unmap_hva_range_pr,
2098 .age_hva = kvm_age_hva_pr,
2099 .test_age_hva = kvm_test_age_hva_pr,
2100 .set_spte_hva = kvm_set_spte_hva_pr,
2101 .mmu_destroy = kvmppc_mmu_destroy_pr,
2102 .free_memslot = kvmppc_core_free_memslot_pr,
2103 .create_memslot = kvmppc_core_create_memslot_pr,
2104 .init_vm = kvmppc_core_init_vm_pr,
2105 .destroy_vm = kvmppc_core_destroy_vm_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302106 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
2107 .emulate_op = kvmppc_core_emulate_op_pr,
2108 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
2109 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
2110 .fast_vcpu_kick = kvm_vcpu_kick,
2111 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002112#ifdef CONFIG_PPC_BOOK3S_64
2113 .hcall_implemented = kvmppc_hcall_impl_pr,
Paul Mackerras9617a0b2018-05-30 15:47:17 +10002114 .configure_mmu = kvm_configure_mmu_pr,
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002115#endif
Simon Guo2e6baa42018-05-21 13:24:22 +08002116 .giveup_ext = kvmppc_giveup_ext,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302117};
2118
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302119
2120int kvmppc_book3s_init_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002121{
2122 int r;
2123
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302124 r = kvmppc_core_check_processor_compat_pr();
2125 if (r < 0)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002126 return r;
2127
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302128 kvm_ops_pr.owner = THIS_MODULE;
2129 kvmppc_pr_ops = &kvm_ops_pr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002130
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302131 r = kvmppc_mmu_hpte_sysinit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002132 return r;
2133}
2134
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302135void kvmppc_book3s_exit_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002136{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302137 kvmppc_pr_ops = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002138 kvmppc_mmu_hpte_sysexit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002139}
2140
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302141/*
2142 * We only support separate modules for book3s 64
2143 */
2144#ifdef CONFIG_PPC_BOOK3S_64
2145
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302146module_init(kvmppc_book3s_init_pr);
2147module_exit(kvmppc_book3s_exit_pr);
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05302148
2149MODULE_LICENSE("GPL");
Alexander Graf398a76c2013-12-09 13:53:42 +01002150MODULE_ALIAS_MISCDEV(KVM_MINOR);
2151MODULE_ALIAS("devname:kvm");
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302152#endif