blob: ad0a2ee8d8b1df76cecc054ca781bae23d63b715 [file] [log] [blame]
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
Paul Gortmaker93087942011-07-29 16:19:31 +100023#include <linux/export.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000024#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080031#include <linux/uaccess.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000032#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
Benjamin Herrenschmidt95327d02012-04-01 17:35:53 +000036#include <asm/switch_to.h>
Ian Munsiea413f472012-12-03 18:36:13 +000037#include <asm/firmware.h>
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +100038#include <asm/setup.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000039#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +053043#include <linux/module.h>
Alexander Graf398a76c2013-12-09 13:53:42 +010044#include <linux/miscdevice.h>
Simon Guo66c33e72018-05-23 15:01:57 +080045#include <asm/asm-prototypes.h>
Simon Guo8d2e2fc2018-05-23 15:01:58 +080046#include <asm/tm.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000047
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053048#include "book3s.h"
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053049
50#define CREATE_TRACE_POINTS
51#include "trace_pr.h"
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000052
53/* #define EXIT_DEBUG */
54/* #define DEBUG_EXT */
55
56static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
57 ulong msr);
Alexander Graf616dff82014-04-29 16:48:44 +020058static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000059
60/* Some compatibility defines */
61#ifdef CONFIG_PPC_BOOK3S_32
62#define MSR_USER32 MSR_USER
63#define MSR_USER64 MSR_USER
64#define HW_PAGE_SIZE PAGE_SIZE
Alexey Kardashevskiy6c7d47c2017-11-22 14:42:21 +110065#define HPTE_R_M _PAGE_COHERENT
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000066#endif
67
Alexander Grafc01e3f62014-07-11 02:58:58 +020068static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
69{
70 ulong msr = kvmppc_get_msr(vcpu);
71 return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
72}
73
74static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
75{
76 ulong msr = kvmppc_get_msr(vcpu);
77 ulong pc = kvmppc_get_pc(vcpu);
78
79 /* We are in DR only split real mode */
80 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
81 return;
82
83 /* We have not fixed up the guest already */
84 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
85 return;
86
87 /* The code is in fixupable address space */
88 if (pc & SPLIT_HACK_MASK)
89 return;
90
91 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
92 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
93}
94
95void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
96
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053097static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000098{
99#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +0100100 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
101 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +0100102 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100103 svcpu->in_use = 0;
Alexander Graf468a12c2011-12-09 14:44:13 +0100104 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000105#endif
Alexander Graffb4188b2014-06-09 01:16:32 +0200106
107 /* Disable AIL if supported */
108 if (cpu_has_feature(CPU_FTR_HVMODE) &&
109 cpu_has_feature(CPU_FTR_ARCH_207S))
110 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
111
Paul Mackerrasa47d72f2012-09-20 19:35:51 +0000112 vcpu->cpu = smp_processor_id();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000113#ifdef CONFIG_PPC_BOOK3S_32
Paul Mackerras3ff95502013-09-20 14:52:49 +1000114 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000115#endif
Alexander Grafc01e3f62014-07-11 02:58:58 +0200116
117 if (kvmppc_is_split_real(vcpu))
118 kvmppc_fixup_split_real(vcpu);
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800119
120 kvmppc_restore_tm_pr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000121}
122
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530123static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000124{
125#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +0100126 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100127 if (svcpu->in_use) {
Alexander Graf07ae5382018-01-31 22:24:58 +0100128 kvmppc_copy_from_svcpu(vcpu);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100129 }
Alexander Graf468a12c2011-12-09 14:44:13 +0100130 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +0100131 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
132 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000133#endif
134
Alexander Grafc01e3f62014-07-11 02:58:58 +0200135 if (kvmppc_is_split_real(vcpu))
136 kvmppc_unfixup_split_real(vcpu);
137
Paul Mackerras28c483b2012-11-04 18:16:46 +0000138 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
Alexander Grafe14e7a12014-04-22 12:26:58 +0200139 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800140 kvmppc_save_tm_pr(vcpu);
Alexander Graffb4188b2014-06-09 01:16:32 +0200141
142 /* Enable AIL if supported */
143 if (cpu_has_feature(CPU_FTR_HVMODE) &&
144 cpu_has_feature(CPU_FTR_ARCH_207S))
145 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
146
Paul Mackerrasa47d72f2012-09-20 19:35:51 +0000147 vcpu->cpu = -1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000148}
149
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000150/* Copy data needed by real-mode code from vcpu to shadow vcpu */
Alexander Graf07ae5382018-01-31 22:24:58 +0100151void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000152{
Alexander Graf07ae5382018-01-31 22:24:58 +0100153 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
154
Simon Guo1143a702018-05-07 14:20:07 +0800155 svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
156 svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
157 svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
158 svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
159 svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
160 svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
161 svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
162 svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
163 svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
164 svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
165 svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
166 svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
167 svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
168 svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000169 svcpu->cr = vcpu->arch.cr;
Simon Guo173c5202018-05-07 14:20:08 +0800170 svcpu->xer = vcpu->arch.regs.xer;
171 svcpu->ctr = vcpu->arch.regs.ctr;
172 svcpu->lr = vcpu->arch.regs.link;
173 svcpu->pc = vcpu->arch.regs.nip;
Alexander Graf616dff82014-04-29 16:48:44 +0200174#ifdef CONFIG_PPC_BOOK3S_64
175 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
176#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530177 /*
178 * Now also save the current time base value. We use this
179 * to find the guest purr and spurr value.
180 */
181 vcpu->arch.entry_tb = get_tb();
Aneesh Kumar K.V8f42ab22014-06-05 17:38:02 +0530182 vcpu->arch.entry_vtb = get_vtb();
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530183 if (cpu_has_feature(CPU_FTR_ARCH_207S))
184 vcpu->arch.entry_ic = mfspr(SPRN_IC);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100185 svcpu->in_use = true;
Alexander Graf07ae5382018-01-31 22:24:58 +0100186
187 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000188}
189
Simon Guo95757bf2018-05-23 15:01:53 +0800190static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
191{
192 ulong guest_msr = kvmppc_get_msr(vcpu);
193 ulong smsr = guest_msr;
194
195 /* Guest MSR values */
196#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
197 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE |
198 MSR_TM | MSR_TS_MASK;
199#else
200 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
201#endif
202 /* Process MSR values */
203 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
204 /* External providers the guest reserved */
205 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
206 /* 64-bit Process MSR values */
207#ifdef CONFIG_PPC_BOOK3S_64
208 smsr |= MSR_ISF | MSR_HV;
209#endif
Simon Guo57063402018-05-23 15:02:01 +0800210#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
211 /*
212 * in guest privileged state, we want to fail all TM transactions.
213 * So disable MSR TM bit so that all tbegin. will be able to be
214 * trapped into host.
215 */
216 if (!(guest_msr & MSR_PR))
217 smsr &= ~MSR_TM;
218#endif
Simon Guo95757bf2018-05-23 15:01:53 +0800219 vcpu->arch.shadow_msr = smsr;
220}
221
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000222/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
Alexander Graf07ae5382018-01-31 22:24:58 +0100223void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000224{
Alexander Graf07ae5382018-01-31 22:24:58 +0100225 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Simon Guo95757bf2018-05-23 15:01:53 +0800226#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
227 ulong old_msr;
228#endif
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100229
230 /*
231 * Maybe we were already preempted and synced the svcpu from
232 * our preempt notifiers. Don't bother touching this svcpu then.
233 */
234 if (!svcpu->in_use)
235 goto out;
236
Simon Guo1143a702018-05-07 14:20:07 +0800237 vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
238 vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
239 vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
240 vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
241 vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
242 vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
243 vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
244 vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
245 vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
246 vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
247 vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
248 vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
249 vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
250 vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000251 vcpu->arch.cr = svcpu->cr;
Simon Guo173c5202018-05-07 14:20:08 +0800252 vcpu->arch.regs.xer = svcpu->xer;
253 vcpu->arch.regs.ctr = svcpu->ctr;
254 vcpu->arch.regs.link = svcpu->lr;
255 vcpu->arch.regs.nip = svcpu->pc;
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000256 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
257 vcpu->arch.fault_dar = svcpu->fault_dar;
258 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
259 vcpu->arch.last_inst = svcpu->last_inst;
Alexander Graf616dff82014-04-29 16:48:44 +0200260#ifdef CONFIG_PPC_BOOK3S_64
261 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
262#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530263 /*
264 * Update purr and spurr using time base on exit.
265 */
266 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
267 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000268 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530269 if (cpu_has_feature(CPU_FTR_ARCH_207S))
270 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
Simon Guo95757bf2018-05-23 15:01:53 +0800271
272#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
273 /*
274 * Unlike other MSR bits, MSR[TS]bits can be changed at guest without
275 * notifying host:
276 * modified by unprivileged instructions like "tbegin"/"tend"/
277 * "tresume"/"tsuspend" in PR KVM guest.
278 *
279 * It is necessary to sync here to calculate a correct shadow_msr.
280 *
281 * privileged guest's tbegin will be failed at present. So we
282 * only take care of problem state guest.
283 */
284 old_msr = kvmppc_get_msr(vcpu);
285 if (unlikely((old_msr & MSR_PR) &&
286 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
287 (old_msr & (MSR_TS_MASK)))) {
288 old_msr &= ~(MSR_TS_MASK);
289 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
290 kvmppc_set_msr_fast(vcpu, old_msr);
291 kvmppc_recalc_shadow_msr(vcpu);
292 }
293#endif
294
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100295 svcpu->in_use = false;
296
297out:
Alexander Graf07ae5382018-01-31 22:24:58 +0100298 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000299}
300
Simon Guo66c33e72018-05-23 15:01:57 +0800301#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Simon Guoe32c53d2018-05-23 15:02:04 +0800302void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
Simon Guo66c33e72018-05-23 15:01:57 +0800303{
304 tm_enable();
305 vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
306 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
307 vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
308 tm_disable();
309}
310
Simon Guo57063402018-05-23 15:02:01 +0800311void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
Simon Guo66c33e72018-05-23 15:01:57 +0800312{
313 tm_enable();
314 mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
315 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
316 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
317 tm_disable();
318}
319
Simon Guo13989b62018-05-23 15:01:59 +0800320/* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
321 * hardware.
322 */
323static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
324{
325 ulong exit_nr;
326 ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
327 (MSR_FP | MSR_VEC | MSR_VSX);
328
329 if (!ext_diff)
330 return;
331
332 if (ext_diff == MSR_FP)
333 exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL;
334 else if (ext_diff == MSR_VEC)
335 exit_nr = BOOK3S_INTERRUPT_ALTIVEC;
336 else
337 exit_nr = BOOK3S_INTERRUPT_VSX;
338
339 kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
340}
341
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800342void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
343{
344 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
345 kvmppc_save_tm_sprs(vcpu);
346 return;
347 }
348
Simon Guo13989b62018-05-23 15:01:59 +0800349 kvmppc_giveup_ext(vcpu, MSR_VSX);
350
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800351 preempt_disable();
352 _kvmppc_save_tm_pr(vcpu, mfmsr());
353 preempt_enable();
354}
355
356void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
357{
358 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
359 kvmppc_restore_tm_sprs(vcpu);
Simon Guo13989b62018-05-23 15:01:59 +0800360 if (kvmppc_get_msr(vcpu) & MSR_TM)
361 kvmppc_handle_lost_math_exts(vcpu);
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800362 return;
363 }
364
365 preempt_disable();
366 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
367 preempt_enable();
Simon Guo13989b62018-05-23 15:01:59 +0800368
369 if (kvmppc_get_msr(vcpu) & MSR_TM)
370 kvmppc_handle_lost_math_exts(vcpu);
371
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800372}
Simon Guo66c33e72018-05-23 15:01:57 +0800373#endif
374
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530375static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
Alexander Graf03d25c52012-08-10 12:28:50 +0200376{
Alexander Graf7c973a22012-08-13 12:50:35 +0200377 int r = 1; /* Indicate we want to get back into the guest */
378
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200379 /* We misuse TLB_FLUSH to indicate that we want to clear
380 all shadow cache entries */
381 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
382 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Graf7c973a22012-08-13 12:50:35 +0200383
384 return r;
Alexander Graf03d25c52012-08-10 12:28:50 +0200385}
386
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200387/************* MMU Notifiers *************/
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000388static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
389 unsigned long end)
390{
391 long i;
392 struct kvm_vcpu *vcpu;
393 struct kvm_memslots *slots;
394 struct kvm_memory_slot *memslot;
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200395
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000396 slots = kvm_memslots(kvm);
397 kvm_for_each_memslot(memslot, slots) {
398 unsigned long hva_start, hva_end;
399 gfn_t gfn, gfn_end;
400
401 hva_start = max(start, memslot->userspace_addr);
402 hva_end = min(end, memslot->userspace_addr +
403 (memslot->npages << PAGE_SHIFT));
404 if (hva_start >= hva_end)
405 continue;
406 /*
407 * {gfn(page) | page intersects with [hva_start, hva_end)} =
408 * {gfn, gfn+1, ..., gfn_end-1}.
409 */
410 gfn = hva_to_gfn_memslot(hva_start, memslot);
411 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
412 kvm_for_each_vcpu(i, vcpu, kvm)
413 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
414 gfn_end << PAGE_SHIFT);
415 }
416}
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200417
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530418static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
419 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200420{
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000421 do_kvm_unmap_hva(kvm, start, end);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200422
423 return 0;
424}
425
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700426static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
427 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200428{
429 /* XXX could be more clever ;) */
430 return 0;
431}
432
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530433static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200434{
435 /* XXX could be more clever ;) */
436 return 0;
437}
438
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530439static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200440{
441 /* The page will get remapped properly on its next fault */
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000442 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200443}
444
445/*****************************************/
446
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530447static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000448{
Simon Guo68ab07b2018-05-23 15:02:06 +0800449 ulong old_msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000450
451#ifdef EXIT_DEBUG
452 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
453#endif
454
Simon Guo68ab07b2018-05-23 15:02:06 +0800455#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
456 /* We should never target guest MSR to TS=10 && PR=0,
457 * since we always fail transaction for guest privilege
458 * state.
459 */
460 if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr))
461 kvmppc_emulate_tabort(vcpu,
462 TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT);
463#endif
464
465 old_msr = kvmppc_get_msr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000466 msr &= to_book3s(vcpu)->msr_mask;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200467 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000468 kvmppc_recalc_shadow_msr(vcpu);
469
470 if (msr & MSR_POW) {
471 if (!vcpu->arch.pending_exceptions) {
472 kvm_vcpu_block(vcpu);
Radim Krčmář72875d82017-04-26 22:32:19 +0200473 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000474 vcpu->stat.halt_wakeup++;
475
476 /* Unset POW bit after we woke up */
477 msr &= ~MSR_POW;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200478 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000479 }
480 }
481
Alexander Grafc01e3f62014-07-11 02:58:58 +0200482 if (kvmppc_is_split_real(vcpu))
483 kvmppc_fixup_split_real(vcpu);
484 else
485 kvmppc_unfixup_split_real(vcpu);
486
Alexander Graf5deb8e72014-04-24 13:46:24 +0200487 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000488 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
489 kvmppc_mmu_flush_segments(vcpu);
490 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
491
492 /* Preload magic page segment when in kernel mode */
493 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
494 struct kvm_vcpu_arch *a = &vcpu->arch;
495
496 if (msr & MSR_DR)
497 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
498 else
499 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
500 }
501 }
502
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000503 /*
504 * When switching from 32 to 64-bit, we may have a stale 32-bit
505 * magic page around, we need to flush it. Typically 32-bit magic
506 * page will be instanciated when calling into RTAS. Note: We
507 * assume that such transition only happens while in kernel mode,
508 * ie, we never transition from user 32-bit to kernel 64-bit with
509 * a 32-bit magic page around.
510 */
511 if (vcpu->arch.magic_page_pa &&
512 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
513 /* going from RTAS to normal kernel code */
514 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
515 ~0xFFFUL);
516 }
517
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000518 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200519 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000520 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
Simon Guo13989b62018-05-23 15:01:59 +0800521
522#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
523 if (kvmppc_get_msr(vcpu) & MSR_TM)
524 kvmppc_handle_lost_math_exts(vcpu);
525#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000526}
527
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530528void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000529{
530 u32 host_pvr;
531
532 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
533 vcpu->arch.pvr = pvr;
534#ifdef CONFIG_PPC_BOOK3S_64
535 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
536 kvmppc_mmu_book3s_64_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200537 if (!to_book3s(vcpu)->hior_explicit)
538 to_book3s(vcpu)->hior = 0xfff00000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000539 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200540 vcpu->arch.cpu_type = KVM_CPU_3S_64;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000541 } else
542#endif
543 {
544 kvmppc_mmu_book3s_32_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200545 if (!to_book3s(vcpu)->hior_explicit)
546 to_book3s(vcpu)->hior = 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000547 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200548 vcpu->arch.cpu_type = KVM_CPU_3S_32;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000549 }
550
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200551 kvmppc_sanity_check(vcpu);
552
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000553 /* If we are in hypervisor level on 970, we can tell the CPU to
554 * treat DCBZ as 32 bytes store */
555 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
556 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
557 !strcmp(cur_cpu_spec->platform, "ppc970"))
558 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
559
560 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
561 really needs them in a VM on Cell and force disable them. */
562 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
563 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
564
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000565 /*
566 * If they're asking for POWER6 or later, set the flag
567 * indicating that we can do multiple large page sizes
568 * and 1TB segments.
569 * Also set the flag that indicates that tlbie has the large
570 * page bit in the RB operand instead of the instruction.
571 */
572 switch (PVR_VER(pvr)) {
573 case PVR_POWER6:
574 case PVR_POWER7:
575 case PVR_POWER7p:
576 case PVR_POWER8:
Thomas Huth2365f6b2016-09-21 13:53:46 +0200577 case PVR_POWER8E:
578 case PVR_POWER8NVL:
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000579 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
580 BOOK3S_HFLAG_NEW_TLBIE;
581 break;
582 }
583
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000584#ifdef CONFIG_PPC_BOOK3S_32
585 /* 32 bit Book3S always has 32 byte dcbz */
586 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
587#endif
588
589 /* On some CPUs we can execute paired single operations natively */
590 asm ( "mfpvr %0" : "=r"(host_pvr));
591 switch (host_pvr) {
592 case 0x00080200: /* lonestar 2.0 */
593 case 0x00088202: /* lonestar 2.2 */
594 case 0x70000100: /* gekko 1.0 */
595 case 0x00080100: /* gekko 2.0 */
596 case 0x00083203: /* gekko 2.3a */
597 case 0x00083213: /* gekko 2.3b */
598 case 0x00083204: /* gekko 2.4 */
599 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
600 case 0x00087200: /* broadway */
601 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
602 /* Enable HID2.PSE - in case we need it later */
603 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
604 }
605}
606
607/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
608 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
609 * emulate 32 bytes dcbz length.
610 *
611 * The Book3s_64 inventors also realized this case and implemented a special bit
612 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
613 *
614 * My approach here is to patch the dcbz instruction on executing pages.
615 */
616static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
617{
618 struct page *hpage;
619 u64 hpage_offset;
620 u32 *page;
621 int i;
622
623 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
Xiao Guangrong32cad842012-08-03 15:42:52 +0800624 if (is_error_page(hpage))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000625 return;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000626
627 hpage_offset = pte->raddr & ~PAGE_MASK;
628 hpage_offset &= ~0xFFFULL;
629 hpage_offset /= 4;
630
631 get_page(hpage);
Cong Wang2480b202011-11-25 23:14:16 +0800632 page = kmap_atomic(hpage);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000633
634 /* patch dcbz into reserved instruction, so we trap */
635 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
Alexander Grafcd087ee2014-04-24 13:52:01 +0200636 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
637 page[i] &= cpu_to_be32(0xfffffff7);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000638
Cong Wang2480b202011-11-25 23:14:16 +0800639 kunmap_atomic(page);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000640 put_page(hpage);
641}
642
Yaowei Bai378b4172015-11-16 11:10:24 +0800643static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000644{
645 ulong mp_pa = vcpu->arch.magic_page_pa;
646
Alexander Graf5deb8e72014-04-24 13:46:24 +0200647 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000648 mp_pa = (uint32_t)mp_pa;
649
Alexander Graf89b68c92014-07-13 16:37:12 +0200650 gpa &= ~0xFFFULL;
651 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
Yaowei Bai378b4172015-11-16 11:10:24 +0800652 return true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000653 }
654
Alexander Graf89b68c92014-07-13 16:37:12 +0200655 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000656}
657
658int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
659 ulong eaddr, int vec)
660{
661 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000662 bool iswrite = false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000663 int r = RESUME_GUEST;
664 int relocated;
665 int page_found = 0;
Alexey Kardashevskiy96df2262017-03-24 17:49:22 +1100666 struct kvmppc_pte pte = { 0 };
Alexander Graf5deb8e72014-04-24 13:46:24 +0200667 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
668 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000669 u64 vsid;
670
671 relocated = data ? dr : ir;
Paul Mackerras93b159b2013-09-20 14:52:51 +1000672 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
673 iswrite = true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000674
675 /* Resolve real address if translation turned on */
676 if (relocated) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000677 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000678 } else {
679 pte.may_execute = true;
680 pte.may_read = true;
681 pte.may_write = true;
682 pte.raddr = eaddr & KVM_PAM;
683 pte.eaddr = eaddr;
684 pte.vpage = eaddr >> 12;
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000685 pte.page_size = MMU_PAGE_64K;
Alexey Kardashevskiy6c7d47c2017-11-22 14:42:21 +1100686 pte.wimg = HPTE_R_M;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000687 }
688
Alexander Graf5deb8e72014-04-24 13:46:24 +0200689 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000690 case 0:
691 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
692 break;
693 case MSR_DR:
Alexander Grafc01e3f62014-07-11 02:58:58 +0200694 if (!data &&
695 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
696 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
697 pte.raddr &= ~SPLIT_HACK_MASK;
698 /* fall through */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000699 case MSR_IR:
700 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
701
Alexander Graf5deb8e72014-04-24 13:46:24 +0200702 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000703 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
704 else
705 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
706 pte.vpage |= vsid;
707
708 if (vsid == -1)
709 page_found = -EINVAL;
710 break;
711 }
712
713 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
714 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
715 /*
716 * If we do the dcbz hack, we have to NX on every execution,
717 * so we can patch the executing code. This renders our guest
718 * NX-less.
719 */
720 pte.may_execute = !data;
721 }
722
723 if (page_found == -ENOENT) {
724 /* Page not found in guest PTE entries */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200725 u64 ssrr1 = vcpu->arch.shadow_srr1;
726 u64 msr = kvmppc_get_msr(vcpu);
727 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
728 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
729 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000730 kvmppc_book3s_queue_irqprio(vcpu, vec);
731 } else if (page_found == -EPERM) {
732 /* Storage protection */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200733 u32 dsisr = vcpu->arch.fault_dsisr;
734 u64 ssrr1 = vcpu->arch.shadow_srr1;
735 u64 msr = kvmppc_get_msr(vcpu);
736 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
737 dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
738 kvmppc_set_dsisr(vcpu, dsisr);
739 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000740 kvmppc_book3s_queue_irqprio(vcpu, vec);
741 } else if (page_found == -EINVAL) {
742 /* Page not found in guest SLB */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200743 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000744 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
Alexey Kardashevskiy9eecec12017-03-24 17:47:13 +1100745 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000746 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
747 /*
748 * There is already a host HPTE there, presumably
749 * a read-only one for a page the guest thinks
750 * is writable, so get rid of it first.
751 */
752 kvmppc_mmu_unmap_page(vcpu, &pte);
753 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000754 /* The guest's PTE is not mapped yet. Map on the host */
Alexey Kardashevskiybd9166f2017-03-24 17:48:10 +1100755 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
756 /* Exit KVM if mapping failed */
757 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
758 return RESUME_HOST;
759 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000760 if (data)
761 vcpu->stat.sp_storage++;
762 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
Paul Mackerras93b159b2013-09-20 14:52:51 +1000763 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000764 kvmppc_patch_dcbz(vcpu, &pte);
765 } else {
766 /* MMIO */
767 vcpu->stat.mmio_exits++;
768 vcpu->arch.paddr_accessed = pte.raddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100769 vcpu->arch.vaddr_accessed = pte.eaddr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000770 r = kvmppc_emulate_mmio(run, vcpu);
771 if ( r == RESUME_HOST_NV )
772 r = RESUME_HOST;
773 }
774
775 return r;
776}
777
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000778/* Give up external provider (FPU, Altivec, VSX) */
779void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
780{
781 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000782
Paul Mackerras28c483b2012-11-04 18:16:46 +0000783 /*
784 * VSX instructions can access FP and vector registers, so if
785 * we are giving up VSX, make sure we give up FP and VMX as well.
786 */
787 if (msr & MSR_VSX)
788 msr |= MSR_FP | MSR_VEC;
789
790 msr &= vcpu->arch.guest_owned_ext;
791 if (!msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000792 return;
793
794#ifdef DEBUG_EXT
795 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
796#endif
797
Paul Mackerras28c483b2012-11-04 18:16:46 +0000798 if (msr & MSR_FP) {
799 /*
800 * Note that on CPUs with VSX, giveup_fpu stores
801 * both the traditional FP registers and the added VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000802 * registers into thread.fp_state.fpr[].
Paul Mackerras28c483b2012-11-04 18:16:46 +0000803 */
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100804 if (t->regs->msr & MSR_FP)
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000805 giveup_fpu(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100806 t->fp_save_area = NULL;
Paul Mackerras28c483b2012-11-04 18:16:46 +0000807 }
808
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000809#ifdef CONFIG_ALTIVEC
Paul Mackerras28c483b2012-11-04 18:16:46 +0000810 if (msr & MSR_VEC) {
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000811 if (current->thread.regs->msr & MSR_VEC)
812 giveup_altivec(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100813 t->vr_save_area = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000814 }
Paul Mackerras28c483b2012-11-04 18:16:46 +0000815#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000816
Paul Mackerras28c483b2012-11-04 18:16:46 +0000817 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000818 kvmppc_recalc_shadow_msr(vcpu);
819}
820
Alexander Graf616dff82014-04-29 16:48:44 +0200821/* Give up facility (TAR / EBB / DSCR) */
822static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
823{
824#ifdef CONFIG_PPC_BOOK3S_64
825 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
826 /* Facility not available to the guest, ignore giveup request*/
827 return;
828 }
Alexander Grafe14e7a12014-04-22 12:26:58 +0200829
830 switch (fac) {
831 case FSCR_TAR_LG:
832 vcpu->arch.tar = mfspr(SPRN_TAR);
833 mtspr(SPRN_TAR, current->thread.tar);
834 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
835 break;
836 }
Alexander Graf616dff82014-04-29 16:48:44 +0200837#endif
838}
839
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000840/* Handle external providers (FPU, Altivec, VSX) */
841static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
842 ulong msr)
843{
844 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000845
846 /* When we have paired singles, we emulate in software */
847 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
848 return RESUME_GUEST;
849
Alexander Graf5deb8e72014-04-24 13:46:24 +0200850 if (!(kvmppc_get_msr(vcpu) & msr)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000851 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
852 return RESUME_GUEST;
853 }
854
Paul Mackerras28c483b2012-11-04 18:16:46 +0000855 if (msr == MSR_VSX) {
856 /* No VSX? Give an illegal instruction interrupt */
857#ifdef CONFIG_VSX
858 if (!cpu_has_feature(CPU_FTR_VSX))
859#endif
860 {
861 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
862 return RESUME_GUEST;
863 }
864
865 /*
866 * We have to load up all the FP and VMX registers before
867 * we can let the guest use VSX instructions.
868 */
869 msr = MSR_FP | MSR_VEC | MSR_VSX;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000870 }
871
Paul Mackerras28c483b2012-11-04 18:16:46 +0000872 /* See if we already own all the ext(s) needed */
873 msr &= ~vcpu->arch.guest_owned_ext;
874 if (!msr)
875 return RESUME_GUEST;
876
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000877#ifdef DEBUG_EXT
878 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
879#endif
880
Paul Mackerras28c483b2012-11-04 18:16:46 +0000881 if (msr & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530882 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100883 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100884 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100885 disable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100886 t->fp_save_area = &vcpu->arch.fp;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530887 preempt_enable();
Paul Mackerras28c483b2012-11-04 18:16:46 +0000888 }
889
890 if (msr & MSR_VEC) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000891#ifdef CONFIG_ALTIVEC
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530892 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100893 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100894 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100895 disable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100896 t->vr_save_area = &vcpu->arch.vr;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530897 preempt_enable();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000898#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000899 }
900
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100901 t->regs->msr |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000902 vcpu->arch.guest_owned_ext |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000903 kvmppc_recalc_shadow_msr(vcpu);
904
905 return RESUME_GUEST;
906}
907
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000908/*
909 * Kernel code using FP or VMX could have flushed guest state to
910 * the thread_struct; if so, get it back now.
911 */
912static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
913{
914 unsigned long lost_ext;
915
916 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
917 if (!lost_ext)
918 return;
919
Paul Mackerras09548fd2013-10-15 20:43:01 +1100920 if (lost_ext & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530921 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100922 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100923 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100924 disable_kernel_fp();
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530925 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100926 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000927#ifdef CONFIG_ALTIVEC
Paul Mackerras09548fd2013-10-15 20:43:01 +1100928 if (lost_ext & MSR_VEC) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530929 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100930 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100931 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100932 disable_kernel_altivec();
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530933 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100934 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000935#endif
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000936 current->thread.regs->msr |= lost_ext;
937}
938
Alexander Graf616dff82014-04-29 16:48:44 +0200939#ifdef CONFIG_PPC_BOOK3S_64
940
Simon Guo533082a2018-05-23 15:02:00 +0800941void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
Alexander Graf616dff82014-04-29 16:48:44 +0200942{
943 /* Inject the Interrupt Cause field and trigger a guest interrupt */
944 vcpu->arch.fscr &= ~(0xffULL << 56);
945 vcpu->arch.fscr |= (fac << 56);
946 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
947}
948
949static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
950{
951 enum emulation_result er = EMULATE_FAIL;
952
953 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
954 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
955
956 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
957 /* Couldn't emulate, trigger interrupt in guest */
958 kvmppc_trigger_fac_interrupt(vcpu, fac);
959 }
960}
961
962/* Enable facilities (TAR, EBB, DSCR) for the guest */
963static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
964{
Alexander Graf9916d572014-04-29 17:54:40 +0200965 bool guest_fac_enabled;
Alexander Graf616dff82014-04-29 16:48:44 +0200966 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
967
Alexander Graf9916d572014-04-29 17:54:40 +0200968 /*
969 * Not every facility is enabled by FSCR bits, check whether the
970 * guest has this facility enabled at all.
971 */
972 switch (fac) {
973 case FSCR_TAR_LG:
974 case FSCR_EBB_LG:
975 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
976 break;
977 case FSCR_TM_LG:
978 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
979 break;
980 default:
981 guest_fac_enabled = false;
982 break;
983 }
984
985 if (!guest_fac_enabled) {
Alexander Graf616dff82014-04-29 16:48:44 +0200986 /* Facility not enabled by the guest */
987 kvmppc_trigger_fac_interrupt(vcpu, fac);
988 return RESUME_GUEST;
989 }
990
991 switch (fac) {
Alexander Grafe14e7a12014-04-22 12:26:58 +0200992 case FSCR_TAR_LG:
993 /* TAR switching isn't lazy in Linux yet */
994 current->thread.tar = mfspr(SPRN_TAR);
995 mtspr(SPRN_TAR, vcpu->arch.tar);
996 vcpu->arch.shadow_fscr |= FSCR_TAR;
997 break;
Alexander Graf616dff82014-04-29 16:48:44 +0200998 default:
999 kvmppc_emulate_fac(vcpu, fac);
1000 break;
1001 }
1002
Simon Guo19c585e2018-05-23 15:02:02 +08001003#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1004 /* Since we disabled MSR_TM at privilege state, the mfspr instruction
1005 * for TM spr can trigger TM fac unavailable. In this case, the
1006 * emulation is handled by kvmppc_emulate_fac(), which invokes
1007 * kvmppc_emulate_mfspr() finally. But note the mfspr can include
1008 * RT for NV registers. So it need to restore those NV reg to reflect
1009 * the update.
1010 */
1011 if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
1012 return RESUME_GUEST_NV;
1013#endif
1014
Alexander Graf616dff82014-04-29 16:48:44 +02001015 return RESUME_GUEST;
1016}
Alexander Graf8e6afa32014-07-31 10:21:59 +02001017
1018void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
1019{
1020 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
1021 /* TAR got dropped, drop it in shadow too */
1022 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1023 }
1024 vcpu->arch.fscr = fscr;
1025}
Alexander Graf616dff82014-04-29 16:48:44 +02001026#endif
1027
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001028static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
1029{
1030 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1031 u64 msr = kvmppc_get_msr(vcpu);
1032
1033 kvmppc_set_msr(vcpu, msr | MSR_SE);
1034 }
1035}
1036
1037static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
1038{
1039 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1040 u64 msr = kvmppc_get_msr(vcpu);
1041
1042 kvmppc_set_msr(vcpu, msr & ~MSR_SE);
1043 }
1044}
1045
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001046static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
1047 unsigned int exit_nr)
1048{
1049 enum emulation_result er;
1050 ulong flags;
1051 u32 last_inst;
1052 int emul, r;
1053
1054 /*
1055 * shadow_srr1 only contains valid flags if we came here via a program
1056 * exception. The other exceptions (emulation assist, FP unavailable,
1057 * etc.) do not provide flags in SRR1, so use an illegal-instruction
1058 * exception when injecting a program interrupt into the guest.
1059 */
1060 if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
1061 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
1062 else
1063 flags = SRR1_PROGILL;
1064
1065 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1066 if (emul != EMULATE_DONE)
1067 return RESUME_GUEST;
1068
1069 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1070#ifdef EXIT_DEBUG
1071 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1072 kvmppc_get_pc(vcpu), last_inst);
1073#endif
1074 if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
1075 kvmppc_core_queue_program(vcpu, flags);
1076 return RESUME_GUEST;
1077 }
1078 }
1079
1080 vcpu->stat.emulated_inst_exits++;
1081 er = kvmppc_emulate_instruction(run, vcpu);
1082 switch (er) {
1083 case EMULATE_DONE:
1084 r = RESUME_GUEST_NV;
1085 break;
1086 case EMULATE_AGAIN:
1087 r = RESUME_GUEST;
1088 break;
1089 case EMULATE_FAIL:
1090 pr_crit("%s: emulation at %lx failed (%08x)\n",
1091 __func__, kvmppc_get_pc(vcpu), last_inst);
1092 kvmppc_core_queue_program(vcpu, flags);
1093 r = RESUME_GUEST;
1094 break;
1095 case EMULATE_DO_MMIO:
1096 run->exit_reason = KVM_EXIT_MMIO;
1097 r = RESUME_HOST_NV;
1098 break;
1099 case EMULATE_EXIT_USER:
1100 r = RESUME_HOST_NV;
1101 break;
1102 default:
1103 BUG();
1104 }
1105
1106 return r;
1107}
1108
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301109int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
1110 unsigned int exit_nr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001111{
1112 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +02001113 int s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001114
1115 vcpu->stat.sum_exits++;
1116
1117 run->exit_reason = KVM_EXIT_UNKNOWN;
1118 run->ready_for_interrupt_injection = 1;
1119
Alexander Grafbd2be682012-08-13 01:04:19 +02001120 /* We get here with MSR.EE=1 */
Alexander Graf3b1d9d72012-04-30 10:56:12 +02001121
Alexander Graf97c95052012-08-02 15:10:00 +02001122 trace_kvm_exit(exit_nr, vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001123 guest_exit();
Alexander Grafc63ddcb2012-08-12 11:27:49 +02001124
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001125 switch (exit_nr) {
1126 case BOOK3S_INTERRUPT_INST_STORAGE:
Alexander Graf468a12c2011-12-09 14:44:13 +01001127 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001128 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001129 vcpu->stat.pf_instruc++;
1130
Alexander Grafc01e3f62014-07-11 02:58:58 +02001131 if (kvmppc_is_split_real(vcpu))
1132 kvmppc_fixup_split_real(vcpu);
1133
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001134#ifdef CONFIG_PPC_BOOK3S_32
1135 /* We set segments as unused segments when invalidating them. So
1136 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001137 {
1138 struct kvmppc_book3s_shadow_vcpu *svcpu;
1139 u32 sr;
1140
1141 svcpu = svcpu_get(vcpu);
1142 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +01001143 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001144 if (sr == SR_INVALID) {
1145 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1146 r = RESUME_GUEST;
1147 break;
1148 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001149 }
1150#endif
1151
1152 /* only care about PTEG not found errors, but leave NX alone */
Alexander Graf468a12c2011-12-09 14:44:13 +01001153 if (shadow_srr1 & 0x40000000) {
Paul Mackerras93b159b2013-09-20 14:52:51 +10001154 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001155 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +10001156 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001157 vcpu->stat.sp_instruc++;
1158 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1159 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1160 /*
1161 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1162 * so we can't use the NX bit inside the guest. Let's cross our fingers,
1163 * that no guest that needs the dcbz hack does NX.
1164 */
1165 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1166 r = RESUME_GUEST;
1167 } else {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001168 u64 msr = kvmppc_get_msr(vcpu);
1169 msr |= shadow_srr1 & 0x58000000;
1170 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001171 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1172 r = RESUME_GUEST;
1173 }
1174 break;
Alexander Graf468a12c2011-12-09 14:44:13 +01001175 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001176 case BOOK3S_INTERRUPT_DATA_STORAGE:
1177 {
1178 ulong dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001179 u32 fault_dsisr = vcpu->arch.fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001180 vcpu->stat.pf_storage++;
1181
1182#ifdef CONFIG_PPC_BOOK3S_32
1183 /* We set segments as unused segments when invalidating them. So
1184 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001185 {
1186 struct kvmppc_book3s_shadow_vcpu *svcpu;
1187 u32 sr;
1188
1189 svcpu = svcpu_get(vcpu);
1190 sr = svcpu->sr[dar >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +01001191 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001192 if (sr == SR_INVALID) {
1193 kvmppc_mmu_map_segment(vcpu, dar);
1194 r = RESUME_GUEST;
1195 break;
1196 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001197 }
1198#endif
1199
Paul Mackerras93b159b2013-09-20 14:52:51 +10001200 /*
1201 * We need to handle missing shadow PTEs, and
1202 * protection faults due to us mapping a page read-only
1203 * when the guest thinks it is writable.
1204 */
1205 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
1206 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001207 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +10001208 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001209 } else {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001210 kvmppc_set_dar(vcpu, dar);
1211 kvmppc_set_dsisr(vcpu, fault_dsisr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001212 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1213 r = RESUME_GUEST;
1214 }
1215 break;
1216 }
1217 case BOOK3S_INTERRUPT_DATA_SEGMENT:
1218 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001219 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001220 kvmppc_book3s_queue_irqprio(vcpu,
1221 BOOK3S_INTERRUPT_DATA_SEGMENT);
1222 }
1223 r = RESUME_GUEST;
1224 break;
1225 case BOOK3S_INTERRUPT_INST_SEGMENT:
1226 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1227 kvmppc_book3s_queue_irqprio(vcpu,
1228 BOOK3S_INTERRUPT_INST_SEGMENT);
1229 }
1230 r = RESUME_GUEST;
1231 break;
1232 /* We're good on these - the host merely wanted to get our attention */
1233 case BOOK3S_INTERRUPT_DECREMENTER:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001234 case BOOK3S_INTERRUPT_HV_DECREMENTER:
Paul Mackerras40688902014-01-08 21:25:36 +11001235 case BOOK3S_INTERRUPT_DOORBELL:
Alexander Graf568fccc2014-06-16 16:37:38 +02001236 case BOOK3S_INTERRUPT_H_DOORBELL:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001237 vcpu->stat.dec_exits++;
1238 r = RESUME_GUEST;
1239 break;
1240 case BOOK3S_INTERRUPT_EXTERNAL:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001241 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
1242 case BOOK3S_INTERRUPT_EXTERNAL_HV:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001243 vcpu->stat.ext_intr_exits++;
1244 r = RESUME_GUEST;
1245 break;
1246 case BOOK3S_INTERRUPT_PERFMON:
1247 r = RESUME_GUEST;
1248 break;
1249 case BOOK3S_INTERRUPT_PROGRAM:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001250 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001251 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001252 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001253 case BOOK3S_INTERRUPT_SYSCALL:
Mihai Caraman51f04722014-07-23 19:06:21 +03001254 {
1255 u32 last_sc;
1256 int emul;
1257
1258 /* Get last sc for papr */
1259 if (vcpu->arch.papr_enabled) {
1260 /* The sc instuction points SRR0 to the next inst */
1261 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1262 if (emul != EMULATE_DONE) {
1263 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1264 r = RESUME_GUEST;
1265 break;
1266 }
1267 }
1268
Alexander Grafa668f2b2011-08-08 17:26:24 +02001269 if (vcpu->arch.papr_enabled &&
Mihai Caraman51f04722014-07-23 19:06:21 +03001270 (last_sc == 0x44000022) &&
Alexander Graf5deb8e72014-04-24 13:46:24 +02001271 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
Alexander Grafa668f2b2011-08-08 17:26:24 +02001272 /* SC 1 papr hypercalls */
1273 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1274 int i;
1275
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301276#ifdef CONFIG_PPC_BOOK3S_64
Alexander Grafa668f2b2011-08-08 17:26:24 +02001277 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1278 r = RESUME_GUEST;
1279 break;
1280 }
Andreas Schwab96f38d72011-11-08 07:17:39 +00001281#endif
Alexander Grafa668f2b2011-08-08 17:26:24 +02001282
1283 run->papr_hcall.nr = cmd;
1284 for (i = 0; i < 9; ++i) {
1285 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1286 run->papr_hcall.args[i] = gpr;
1287 }
1288 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1289 vcpu->arch.hcall_needed = 1;
1290 r = RESUME_HOST;
1291 } else if (vcpu->arch.osi_enabled &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001292 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1293 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1294 /* MOL hypercalls */
1295 u64 *gprs = run->osi.gprs;
1296 int i;
1297
1298 run->exit_reason = KVM_EXIT_OSI;
1299 for (i = 0; i < 32; i++)
1300 gprs[i] = kvmppc_get_gpr(vcpu, i);
1301 vcpu->arch.osi_needed = 1;
1302 r = RESUME_HOST_NV;
Alexander Graf5deb8e72014-04-24 13:46:24 +02001303 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001304 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1305 /* KVM PV hypercalls */
1306 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1307 r = RESUME_GUEST;
1308 } else {
1309 /* Guest syscalls */
1310 vcpu->stat.syscall_exits++;
1311 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1312 r = RESUME_GUEST;
1313 }
1314 break;
Mihai Caraman51f04722014-07-23 19:06:21 +03001315 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001316 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1317 case BOOK3S_INTERRUPT_ALTIVEC:
1318 case BOOK3S_INTERRUPT_VSX:
1319 {
1320 int ext_msr = 0;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001321 int emul;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001322 u32 last_inst;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001323
Mihai Caraman9a26af62014-07-23 19:06:20 +03001324 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1325 /* Do paired single instruction emulation */
Mihai Caraman51f04722014-07-23 19:06:21 +03001326 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1327 &last_inst);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001328 if (emul == EMULATE_DONE)
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001329 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001330 else
1331 r = RESUME_GUEST;
1332
1333 break;
1334 }
1335
1336 /* Enable external provider */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001337 switch (exit_nr) {
Mihai Caraman9a26af62014-07-23 19:06:20 +03001338 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1339 ext_msr = MSR_FP;
1340 break;
1341
1342 case BOOK3S_INTERRUPT_ALTIVEC:
1343 ext_msr = MSR_VEC;
1344 break;
1345
1346 case BOOK3S_INTERRUPT_VSX:
1347 ext_msr = MSR_VSX;
1348 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001349 }
1350
Mihai Caraman9a26af62014-07-23 19:06:20 +03001351 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001352 break;
1353 }
1354 case BOOK3S_INTERRUPT_ALIGNMENT:
Mihai Caraman9a26af62014-07-23 19:06:20 +03001355 {
Mihai Caraman51f04722014-07-23 19:06:21 +03001356 u32 last_inst;
1357 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001358
1359 if (emul == EMULATE_DONE) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001360 u32 dsisr;
1361 u64 dar;
1362
1363 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1364 dar = kvmppc_alignment_dar(vcpu, last_inst);
1365
1366 kvmppc_set_dsisr(vcpu, dsisr);
1367 kvmppc_set_dar(vcpu, dar);
1368
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001369 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1370 }
1371 r = RESUME_GUEST;
1372 break;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001373 }
Alexander Graf616dff82014-04-29 16:48:44 +02001374#ifdef CONFIG_PPC_BOOK3S_64
1375 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
Simon Guo19c585e2018-05-23 15:02:02 +08001376 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
Alexander Graf616dff82014-04-29 16:48:44 +02001377 break;
1378#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001379 case BOOK3S_INTERRUPT_MACHINE_CHECK:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001380 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1381 r = RESUME_GUEST;
1382 break;
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001383 case BOOK3S_INTERRUPT_TRACE:
1384 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1385 run->exit_reason = KVM_EXIT_DEBUG;
1386 r = RESUME_HOST;
1387 } else {
1388 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1389 r = RESUME_GUEST;
1390 }
1391 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001392 default:
Alexander Graf468a12c2011-12-09 14:44:13 +01001393 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001394 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001395 /* Ugh - bork here! What did we get? */
1396 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
Alexander Graf468a12c2011-12-09 14:44:13 +01001397 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001398 r = RESUME_HOST;
1399 BUG();
1400 break;
1401 }
Alexander Graf468a12c2011-12-09 14:44:13 +01001402 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001403
1404 if (!(r & RESUME_HOST)) {
1405 /* To avoid clobbering exit_reason, only check for signals if
1406 * we aren't already exiting to userspace for some other
1407 * reason. */
Alexander Grafe371f712011-12-19 13:36:55 +01001408
1409 /*
1410 * Interrupts could be timers for the guest which we have to
1411 * inject again, so let's postpone them until we're in the guest
1412 * and if we really did time things so badly, then we just exit
1413 * again due to a host external interrupt.
1414 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001415 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001416 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001417 r = s;
Scott Wood6c85f522014-01-09 19:18:40 -06001418 else {
1419 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001420 kvmppc_fix_ee_before_entry();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001421 }
Scott Wood6c85f522014-01-09 19:18:40 -06001422
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +10001423 kvmppc_handle_lost_ext(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001424 }
1425
1426 trace_kvm_book3s_reenter(r, vcpu);
1427
1428 return r;
1429}
1430
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301431static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1432 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001433{
1434 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1435 int i;
1436
1437 sregs->pvr = vcpu->arch.pvr;
1438
1439 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1440 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1441 for (i = 0; i < 64; i++) {
1442 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1443 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1444 }
1445 } else {
1446 for (i = 0; i < 16; i++)
Alexander Graf5deb8e72014-04-24 13:46:24 +02001447 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001448
1449 for (i = 0; i < 8; i++) {
1450 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1451 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1452 }
1453 }
1454
1455 return 0;
1456}
1457
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301458static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1459 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001460{
1461 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1462 int i;
1463
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301464 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001465
1466 vcpu3s->sdr1 = sregs->u.s.sdr1;
Greg Kurzf4093ee2017-10-16 12:29:44 +02001467#ifdef CONFIG_PPC_BOOK3S_64
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001468 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
Greg Kurzf4093ee2017-10-16 12:29:44 +02001469 /* Flush all SLB entries */
1470 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1471 vcpu->arch.mmu.slbia(vcpu);
1472
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001473 for (i = 0; i < 64; i++) {
Greg Kurzf4093ee2017-10-16 12:29:44 +02001474 u64 rb = sregs->u.s.ppc64.slb[i].slbe;
1475 u64 rs = sregs->u.s.ppc64.slb[i].slbv;
1476
1477 if (rb & SLB_ESID_V)
1478 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001479 }
Greg Kurzf4093ee2017-10-16 12:29:44 +02001480 } else
1481#endif
1482 {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001483 for (i = 0; i < 16; i++) {
1484 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1485 }
1486 for (i = 0; i < 8; i++) {
1487 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1488 (u32)sregs->u.s.ppc32.ibat[i]);
1489 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1490 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1491 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1492 (u32)sregs->u.s.ppc32.dbat[i]);
1493 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1494 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1495 }
1496 }
1497
1498 /* Flush the MMU after messing with the segments */
1499 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1500
1501 return 0;
1502}
1503
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301504static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1505 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001506{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001507 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001508
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001509 switch (id) {
Madhavan Srinivasana59c1d92014-09-09 22:37:35 +05301510 case KVM_REG_PPC_DEBUG_INST:
1511 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1512 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001513 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001514 *val = get_reg_val(id, to_book3s(vcpu)->hior);
Paul Mackerras31f34382011-12-12 12:26:50 +00001515 break;
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001516 case KVM_REG_PPC_VTB:
1517 *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1518 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301519 case KVM_REG_PPC_LPCR:
Alexey Kardashevskiya0840242014-07-19 17:59:34 +10001520 case KVM_REG_PPC_LPCR_64:
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301521 /*
1522 * We are only interested in the LPCR_ILE bit
1523 */
1524 if (vcpu->arch.intr_msr & MSR_LE)
1525 *val = get_reg_val(id, LPCR_ILE);
1526 else
1527 *val = get_reg_val(id, 0);
1528 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001529 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001530 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001531 break;
1532 }
1533
1534 return r;
1535}
1536
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301537static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1538{
1539 if (new_lpcr & LPCR_ILE)
1540 vcpu->arch.intr_msr |= MSR_LE;
1541 else
1542 vcpu->arch.intr_msr &= ~MSR_LE;
1543}
1544
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301545static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1546 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001547{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001548 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001549
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001550 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001551 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001552 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1553 to_book3s(vcpu)->hior_explicit = true;
Paul Mackerras31f34382011-12-12 12:26:50 +00001554 break;
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001555 case KVM_REG_PPC_VTB:
1556 to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1557 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301558 case KVM_REG_PPC_LPCR:
Alexey Kardashevskiya0840242014-07-19 17:59:34 +10001559 case KVM_REG_PPC_LPCR_64:
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301560 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1561 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001562 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001563 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001564 break;
1565 }
1566
1567 return r;
1568}
1569
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301570static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1571 unsigned int id)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001572{
1573 struct kvmppc_vcpu_book3s *vcpu_book3s;
1574 struct kvm_vcpu *vcpu;
1575 int err = -ENOMEM;
1576 unsigned long p;
1577
Paul Mackerras3ff95502013-09-20 14:52:49 +10001578 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1579 if (!vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001580 goto out;
1581
Paul Mackerras3ff95502013-09-20 14:52:49 +10001582 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1583 if (!vcpu_book3s)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001584 goto free_vcpu;
Paul Mackerras3ff95502013-09-20 14:52:49 +10001585 vcpu->arch.book3s = vcpu_book3s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001586
Alexander Grafab784752014-04-06 23:31:48 +02001587#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001588 vcpu->arch.shadow_vcpu =
1589 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1590 if (!vcpu->arch.shadow_vcpu)
1591 goto free_vcpu3s;
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001592#endif
Paul Mackerras3ff95502013-09-20 14:52:49 +10001593
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001594 err = kvm_vcpu_init(vcpu, kvm, id);
1595 if (err)
1596 goto free_shadow_vcpu;
1597
Thadeu Lima de Souza Cascardo7c7b4062013-07-17 12:10:29 -03001598 err = -ENOMEM;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001599 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001600 if (!p)
1601 goto uninit_vcpu;
Alexander Graf89b68c92014-07-13 16:37:12 +02001602 vcpu->arch.shared = (void *)p;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001603#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf5deb8e72014-04-24 13:46:24 +02001604 /* Always start the shared struct in native endian mode */
1605#ifdef __BIG_ENDIAN__
1606 vcpu->arch.shared_big_endian = true;
1607#else
1608 vcpu->arch.shared_big_endian = false;
1609#endif
1610
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001611 /*
1612 * Default to the same as the host if we're on sufficiently
1613 * recent machine that we have 1TB segments;
1614 * otherwise default to PPC970FX.
1615 */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001616 vcpu->arch.pvr = 0x3C0301;
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001617 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1618 vcpu->arch.pvr = mfspr(SPRN_PVR);
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301619 vcpu->arch.intr_msr = MSR_SF;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001620#else
1621 /* default to book3s_32 (750) */
1622 vcpu->arch.pvr = 0x84202;
1623#endif
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301624 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001625 vcpu->arch.slb_nr = 64;
1626
Alexander Graf94810ba2014-04-24 13:04:01 +02001627 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001628
1629 err = kvmppc_mmu_init(vcpu);
1630 if (err < 0)
1631 goto uninit_vcpu;
1632
1633 return vcpu;
1634
1635uninit_vcpu:
1636 kvm_vcpu_uninit(vcpu);
1637free_shadow_vcpu:
Alexander Grafab784752014-04-06 23:31:48 +02001638#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001639 kfree(vcpu->arch.shadow_vcpu);
1640free_vcpu3s:
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001641#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001642 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001643free_vcpu:
1644 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001645out:
1646 return ERR_PTR(err);
1647}
1648
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301649static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001650{
1651 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1652
1653 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1654 kvm_vcpu_uninit(vcpu);
Alexander Grafab784752014-04-06 23:31:48 +02001655#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001656 kfree(vcpu->arch.shadow_vcpu);
1657#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001658 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001659 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001660}
1661
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301662static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001663{
1664 int ret;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001665#ifdef CONFIG_ALTIVEC
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001666 unsigned long uninitialized_var(vrsave);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001667#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001668
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001669 /* Check if we can run the vcpu at all */
1670 if (!vcpu->arch.sane) {
1671 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
Alexander Graf7d827142011-12-09 15:46:21 +01001672 ret = -EINVAL;
1673 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001674 }
1675
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001676 kvmppc_setup_debug(vcpu);
1677
Alexander Grafe371f712011-12-19 13:36:55 +01001678 /*
1679 * Interrupts could be timers for the guest which we have to inject
1680 * again, so let's postpone them until we're in the guest and if we
1681 * really did time things so badly, then we just exit again due to
1682 * a host external interrupt.
1683 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001684 ret = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001685 if (ret <= 0)
Alexander Graf7d827142011-12-09 15:46:21 +01001686 goto out;
Scott Wood6c85f522014-01-09 19:18:40 -06001687 /* interrupts now hard-disabled */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001688
Anton Blanchardc2085052015-10-29 11:44:08 +11001689 /* Save FPU, Altivec and VSX state */
1690 giveup_all(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001691
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001692 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +02001693 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001694 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1695
Scott Wood5f1c2482013-07-10 17:47:39 -05001696 kvmppc_fix_ee_before_entry();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001697
1698 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1699
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001700 kvmppc_clear_debug(vcpu);
1701
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001702 /* No need for guest_exit. It's done in handle_exit.
Alexander Graf24afa372012-08-12 12:42:30 +02001703 We also get here with interrupts enabled. */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001704
Paul Mackerras28c483b2012-11-04 18:16:46 +00001705 /* Make sure we save the guest FPU/Altivec/VSX state */
1706 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1707
Alexander Grafe14e7a12014-04-22 12:26:58 +02001708 /* Make sure we save the guest TAR/EBB/DSCR state */
1709 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1710
Alexander Graf7d827142011-12-09 15:46:21 +01001711out:
Alexander Graf0652eaa2012-08-12 11:34:21 +02001712 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001713 return ret;
1714}
1715
Paul Mackerras82ed3612011-12-15 02:03:22 +00001716/*
1717 * Get (and clear) the dirty memory log for a memory slot.
1718 */
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301719static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1720 struct kvm_dirty_log *log)
Paul Mackerras82ed3612011-12-15 02:03:22 +00001721{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001722 struct kvm_memslots *slots;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001723 struct kvm_memory_slot *memslot;
1724 struct kvm_vcpu *vcpu;
1725 ulong ga, ga_end;
1726 int is_dirty = 0;
1727 int r;
1728 unsigned long n;
1729
1730 mutex_lock(&kvm->slots_lock);
1731
1732 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1733 if (r)
1734 goto out;
1735
1736 /* If nothing is dirty, don't bother messing with page tables. */
1737 if (is_dirty) {
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001738 slots = kvm_memslots(kvm);
1739 memslot = id_to_memslot(slots, log->slot);
Paul Mackerras82ed3612011-12-15 02:03:22 +00001740
1741 ga = memslot->base_gfn << PAGE_SHIFT;
1742 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1743
1744 kvm_for_each_vcpu(n, vcpu, kvm)
1745 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1746
1747 n = kvm_dirty_bitmap_bytes(memslot);
1748 memset(memslot->dirty_bitmap, 0, n);
1749 }
1750
1751 r = 0;
1752out:
1753 mutex_unlock(&kvm->slots_lock);
1754 return r;
1755}
1756
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301757static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1758 struct kvm_memory_slot *memslot)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001759{
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301760 return;
1761}
1762
1763static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1764 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001765 const struct kvm_userspace_memory_region *mem)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301766{
1767 return 0;
1768}
1769
1770static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001771 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001772 const struct kvm_memory_slot *old,
1773 const struct kvm_memory_slot *new)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301774{
1775 return;
1776}
1777
1778static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1779 struct kvm_memory_slot *dont)
1780{
1781 return;
1782}
1783
1784static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1785 unsigned long npages)
1786{
1787 return 0;
1788}
1789
1790
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001791#ifdef CONFIG_PPC64
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301792static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1793 struct kvm_ppc_smmu_info *info)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001794{
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001795 long int i;
1796 struct kvm_vcpu *vcpu;
1797
1798 info->flags = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001799
1800 /* SLB is always 64 entries */
1801 info->slb_size = 64;
1802
1803 /* Standard 4k base page size segment */
1804 info->sps[0].page_shift = 12;
1805 info->sps[0].slb_enc = 0;
1806 info->sps[0].enc[0].page_shift = 12;
1807 info->sps[0].enc[0].pte_enc = 0;
1808
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001809 /*
1810 * 64k large page size.
1811 * We only want to put this in if the CPUs we're emulating
1812 * support it, but unfortunately we don't have a vcpu easily
1813 * to hand here to test. Just pick the first vcpu, and if
1814 * that doesn't exist yet, report the minimum capability,
1815 * i.e., no 64k pages.
1816 * 1T segment support goes along with 64k pages.
1817 */
1818 i = 1;
1819 vcpu = kvm_get_vcpu(kvm, 0);
1820 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1821 info->flags = KVM_PPC_1T_SEGMENTS;
1822 info->sps[i].page_shift = 16;
1823 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1824 info->sps[i].enc[0].page_shift = 16;
1825 info->sps[i].enc[0].pte_enc = 1;
1826 ++i;
1827 }
1828
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001829 /* Standard 16M large page size segment */
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001830 info->sps[i].page_shift = 24;
1831 info->sps[i].slb_enc = SLB_VSID_L;
1832 info->sps[i].enc[0].page_shift = 24;
1833 info->sps[i].enc[0].pte_enc = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001834
1835 return 0;
1836}
Paul Mackerras9617a0b2018-05-30 15:47:17 +10001837
1838static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
1839{
1840 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1841 return -ENODEV;
1842 /* Require flags and process table base and size to all be zero. */
1843 if (cfg->flags || cfg->process_table)
1844 return -EINVAL;
1845 return 0;
1846}
1847
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301848#else
1849static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1850 struct kvm_ppc_smmu_info *info)
1851{
1852 /* We should not get called */
1853 BUG();
1854}
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001855#endif /* CONFIG_PPC64 */
1856
Ian Munsiea413f472012-12-03 18:36:13 +00001857static unsigned int kvm_global_user_count = 0;
1858static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1859
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301860static int kvmppc_core_init_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001861{
Paul Mackerras9308ab82013-09-20 14:52:48 +10001862 mutex_init(&kvm->arch.hpt_mutex);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001863
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001864#ifdef CONFIG_PPC_BOOK3S_64
1865 /* Start out with the default set of hcalls enabled */
1866 kvmppc_pr_init_default_hcalls(kvm);
1867#endif
1868
Ian Munsiea413f472012-12-03 18:36:13 +00001869 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1870 spin_lock(&kvm_global_user_count_lock);
1871 if (++kvm_global_user_count == 1)
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +10001872 pseries_disable_reloc_on_exc();
Ian Munsiea413f472012-12-03 18:36:13 +00001873 spin_unlock(&kvm_global_user_count_lock);
1874 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001875 return 0;
1876}
1877
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301878static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001879{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001880#ifdef CONFIG_PPC64
1881 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1882#endif
Ian Munsiea413f472012-12-03 18:36:13 +00001883
1884 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1885 spin_lock(&kvm_global_user_count_lock);
1886 BUG_ON(kvm_global_user_count == 0);
1887 if (--kvm_global_user_count == 0)
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +10001888 pseries_enable_reloc_on_exc();
Ian Munsiea413f472012-12-03 18:36:13 +00001889 spin_unlock(&kvm_global_user_count_lock);
1890 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001891}
1892
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301893static int kvmppc_core_check_processor_compat_pr(void)
1894{
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10001895 /*
Paul Mackerrasec531d02018-05-18 21:49:28 +10001896 * PR KVM can work on POWER9 inside a guest partition
1897 * running in HPT mode. It can't work if we are using
1898 * radix translation (because radix provides no way for
1899 * a process to have unique translations in quadrant 3)
1900 * or in a bare-metal HPT-mode host (because POWER9
1901 * uses a modified HPTE format which the PR KVM code
1902 * has not been adapted to use).
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10001903 */
Paul Mackerrasec531d02018-05-18 21:49:28 +10001904 if (cpu_has_feature(CPU_FTR_ARCH_300) &&
1905 (radix_enabled() || cpu_has_feature(CPU_FTR_HVMODE)))
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10001906 return -EIO;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301907 return 0;
1908}
1909
1910static long kvm_arch_vm_ioctl_pr(struct file *filp,
1911 unsigned int ioctl, unsigned long arg)
1912{
1913 return -ENOTTY;
1914}
1915
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301916static struct kvmppc_ops kvm_ops_pr = {
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301917 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1918 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1919 .get_one_reg = kvmppc_get_one_reg_pr,
1920 .set_one_reg = kvmppc_set_one_reg_pr,
1921 .vcpu_load = kvmppc_core_vcpu_load_pr,
1922 .vcpu_put = kvmppc_core_vcpu_put_pr,
1923 .set_msr = kvmppc_set_msr_pr,
1924 .vcpu_run = kvmppc_vcpu_run_pr,
1925 .vcpu_create = kvmppc_core_vcpu_create_pr,
1926 .vcpu_free = kvmppc_core_vcpu_free_pr,
1927 .check_requests = kvmppc_core_check_requests_pr,
1928 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1929 .flush_memslot = kvmppc_core_flush_memslot_pr,
1930 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1931 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301932 .unmap_hva_range = kvm_unmap_hva_range_pr,
1933 .age_hva = kvm_age_hva_pr,
1934 .test_age_hva = kvm_test_age_hva_pr,
1935 .set_spte_hva = kvm_set_spte_hva_pr,
1936 .mmu_destroy = kvmppc_mmu_destroy_pr,
1937 .free_memslot = kvmppc_core_free_memslot_pr,
1938 .create_memslot = kvmppc_core_create_memslot_pr,
1939 .init_vm = kvmppc_core_init_vm_pr,
1940 .destroy_vm = kvmppc_core_destroy_vm_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301941 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1942 .emulate_op = kvmppc_core_emulate_op_pr,
1943 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1944 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1945 .fast_vcpu_kick = kvm_vcpu_kick,
1946 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
Paul Mackerrasae2113a2014-06-02 11:03:00 +10001947#ifdef CONFIG_PPC_BOOK3S_64
1948 .hcall_implemented = kvmppc_hcall_impl_pr,
Paul Mackerras9617a0b2018-05-30 15:47:17 +10001949 .configure_mmu = kvm_configure_mmu_pr,
Paul Mackerrasae2113a2014-06-02 11:03:00 +10001950#endif
Simon Guo2e6baa42018-05-21 13:24:22 +08001951 .giveup_ext = kvmppc_giveup_ext,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301952};
1953
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301954
1955int kvmppc_book3s_init_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001956{
1957 int r;
1958
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301959 r = kvmppc_core_check_processor_compat_pr();
1960 if (r < 0)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001961 return r;
1962
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301963 kvm_ops_pr.owner = THIS_MODULE;
1964 kvmppc_pr_ops = &kvm_ops_pr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001965
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301966 r = kvmppc_mmu_hpte_sysinit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001967 return r;
1968}
1969
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301970void kvmppc_book3s_exit_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001971{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301972 kvmppc_pr_ops = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001973 kvmppc_mmu_hpte_sysexit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001974}
1975
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301976/*
1977 * We only support separate modules for book3s 64
1978 */
1979#ifdef CONFIG_PPC_BOOK3S_64
1980
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301981module_init(kvmppc_book3s_init_pr);
1982module_exit(kvmppc_book3s_exit_pr);
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301983
1984MODULE_LICENSE("GPL");
Alexander Graf398a76c2013-12-09 13:53:42 +01001985MODULE_ALIAS_MISCDEV(KVM_MINOR);
1986MODULE_ALIAS("devname:kvm");
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301987#endif