blob: 9369cd3214176dddf3ffec084e9afd858206133f [file] [log] [blame]
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
Paul Gortmaker93087942011-07-29 16:19:31 +100023#include <linux/export.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000024#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080031#include <linux/uaccess.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000032#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
Benjamin Herrenschmidt95327d02012-04-01 17:35:53 +000036#include <asm/switch_to.h>
Ian Munsiea413f472012-12-03 18:36:13 +000037#include <asm/firmware.h>
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +100038#include <asm/setup.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000039#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +053043#include <linux/module.h>
Alexander Graf398a76c2013-12-09 13:53:42 +010044#include <linux/miscdevice.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000045
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053046#include "book3s.h"
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053047
48#define CREATE_TRACE_POINTS
49#include "trace_pr.h"
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000050
51/* #define EXIT_DEBUG */
52/* #define DEBUG_EXT */
53
54static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
55 ulong msr);
Alexander Graf616dff82014-04-29 16:48:44 +020056static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000057
58/* Some compatibility defines */
59#ifdef CONFIG_PPC_BOOK3S_32
60#define MSR_USER32 MSR_USER
61#define MSR_USER64 MSR_USER
62#define HW_PAGE_SIZE PAGE_SIZE
Alexey Kardashevskiy6c7d47c2017-11-22 14:42:21 +110063#define HPTE_R_M _PAGE_COHERENT
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000064#endif
65
Alexander Grafc01e3f62014-07-11 02:58:58 +020066static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
67{
68 ulong msr = kvmppc_get_msr(vcpu);
69 return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
70}
71
72static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
73{
74 ulong msr = kvmppc_get_msr(vcpu);
75 ulong pc = kvmppc_get_pc(vcpu);
76
77 /* We are in DR only split real mode */
78 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
79 return;
80
81 /* We have not fixed up the guest already */
82 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
83 return;
84
85 /* The code is in fixupable address space */
86 if (pc & SPLIT_HACK_MASK)
87 return;
88
89 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
90 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
91}
92
93void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
94
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053095static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000096{
97#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +010098 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
99 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +0100100 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100101 svcpu->in_use = 0;
Alexander Graf468a12c2011-12-09 14:44:13 +0100102 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000103#endif
Alexander Graffb4188b2014-06-09 01:16:32 +0200104
105 /* Disable AIL if supported */
106 if (cpu_has_feature(CPU_FTR_HVMODE) &&
107 cpu_has_feature(CPU_FTR_ARCH_207S))
108 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
109
Paul Mackerrasa47d72f2012-09-20 19:35:51 +0000110 vcpu->cpu = smp_processor_id();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000111#ifdef CONFIG_PPC_BOOK3S_32
Paul Mackerras3ff95502013-09-20 14:52:49 +1000112 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000113#endif
Alexander Grafc01e3f62014-07-11 02:58:58 +0200114
115 if (kvmppc_is_split_real(vcpu))
116 kvmppc_fixup_split_real(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000117}
118
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530119static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000120{
121#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +0100122 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100123 if (svcpu->in_use) {
Alexander Graf07ae5382018-01-31 22:24:58 +0100124 kvmppc_copy_from_svcpu(vcpu);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100125 }
Alexander Graf468a12c2011-12-09 14:44:13 +0100126 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +0100127 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
128 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000129#endif
130
Alexander Grafc01e3f62014-07-11 02:58:58 +0200131 if (kvmppc_is_split_real(vcpu))
132 kvmppc_unfixup_split_real(vcpu);
133
Paul Mackerras28c483b2012-11-04 18:16:46 +0000134 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
Alexander Grafe14e7a12014-04-22 12:26:58 +0200135 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Alexander Graffb4188b2014-06-09 01:16:32 +0200136
137 /* Enable AIL if supported */
138 if (cpu_has_feature(CPU_FTR_HVMODE) &&
139 cpu_has_feature(CPU_FTR_ARCH_207S))
140 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
141
Paul Mackerrasa47d72f2012-09-20 19:35:51 +0000142 vcpu->cpu = -1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000143}
144
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000145/* Copy data needed by real-mode code from vcpu to shadow vcpu */
Alexander Graf07ae5382018-01-31 22:24:58 +0100146void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000147{
Alexander Graf07ae5382018-01-31 22:24:58 +0100148 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
149
Simon Guo1143a702018-05-07 14:20:07 +0800150 svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
151 svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
152 svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
153 svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
154 svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
155 svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
156 svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
157 svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
158 svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
159 svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
160 svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
161 svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
162 svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
163 svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000164 svcpu->cr = vcpu->arch.cr;
Simon Guo173c5202018-05-07 14:20:08 +0800165 svcpu->xer = vcpu->arch.regs.xer;
166 svcpu->ctr = vcpu->arch.regs.ctr;
167 svcpu->lr = vcpu->arch.regs.link;
168 svcpu->pc = vcpu->arch.regs.nip;
Alexander Graf616dff82014-04-29 16:48:44 +0200169#ifdef CONFIG_PPC_BOOK3S_64
170 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
171#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530172 /*
173 * Now also save the current time base value. We use this
174 * to find the guest purr and spurr value.
175 */
176 vcpu->arch.entry_tb = get_tb();
Aneesh Kumar K.V8f42ab22014-06-05 17:38:02 +0530177 vcpu->arch.entry_vtb = get_vtb();
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530178 if (cpu_has_feature(CPU_FTR_ARCH_207S))
179 vcpu->arch.entry_ic = mfspr(SPRN_IC);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100180 svcpu->in_use = true;
Alexander Graf07ae5382018-01-31 22:24:58 +0100181
182 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000183}
184
Simon Guo95757bf2018-05-23 15:01:53 +0800185static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
186{
187 ulong guest_msr = kvmppc_get_msr(vcpu);
188 ulong smsr = guest_msr;
189
190 /* Guest MSR values */
191#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
192 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE |
193 MSR_TM | MSR_TS_MASK;
194#else
195 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
196#endif
197 /* Process MSR values */
198 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
199 /* External providers the guest reserved */
200 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
201 /* 64-bit Process MSR values */
202#ifdef CONFIG_PPC_BOOK3S_64
203 smsr |= MSR_ISF | MSR_HV;
204#endif
205 vcpu->arch.shadow_msr = smsr;
206}
207
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000208/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
Alexander Graf07ae5382018-01-31 22:24:58 +0100209void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000210{
Alexander Graf07ae5382018-01-31 22:24:58 +0100211 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Simon Guo95757bf2018-05-23 15:01:53 +0800212#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
213 ulong old_msr;
214#endif
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100215
216 /*
217 * Maybe we were already preempted and synced the svcpu from
218 * our preempt notifiers. Don't bother touching this svcpu then.
219 */
220 if (!svcpu->in_use)
221 goto out;
222
Simon Guo1143a702018-05-07 14:20:07 +0800223 vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
224 vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
225 vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
226 vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
227 vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
228 vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
229 vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
230 vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
231 vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
232 vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
233 vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
234 vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
235 vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
236 vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000237 vcpu->arch.cr = svcpu->cr;
Simon Guo173c5202018-05-07 14:20:08 +0800238 vcpu->arch.regs.xer = svcpu->xer;
239 vcpu->arch.regs.ctr = svcpu->ctr;
240 vcpu->arch.regs.link = svcpu->lr;
241 vcpu->arch.regs.nip = svcpu->pc;
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000242 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
243 vcpu->arch.fault_dar = svcpu->fault_dar;
244 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
245 vcpu->arch.last_inst = svcpu->last_inst;
Alexander Graf616dff82014-04-29 16:48:44 +0200246#ifdef CONFIG_PPC_BOOK3S_64
247 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
248#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530249 /*
250 * Update purr and spurr using time base on exit.
251 */
252 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
253 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000254 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530255 if (cpu_has_feature(CPU_FTR_ARCH_207S))
256 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
Simon Guo95757bf2018-05-23 15:01:53 +0800257
258#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
259 /*
260 * Unlike other MSR bits, MSR[TS]bits can be changed at guest without
261 * notifying host:
262 * modified by unprivileged instructions like "tbegin"/"tend"/
263 * "tresume"/"tsuspend" in PR KVM guest.
264 *
265 * It is necessary to sync here to calculate a correct shadow_msr.
266 *
267 * privileged guest's tbegin will be failed at present. So we
268 * only take care of problem state guest.
269 */
270 old_msr = kvmppc_get_msr(vcpu);
271 if (unlikely((old_msr & MSR_PR) &&
272 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
273 (old_msr & (MSR_TS_MASK)))) {
274 old_msr &= ~(MSR_TS_MASK);
275 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
276 kvmppc_set_msr_fast(vcpu, old_msr);
277 kvmppc_recalc_shadow_msr(vcpu);
278 }
279#endif
280
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100281 svcpu->in_use = false;
282
283out:
Alexander Graf07ae5382018-01-31 22:24:58 +0100284 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000285}
286
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530287static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
Alexander Graf03d25c52012-08-10 12:28:50 +0200288{
Alexander Graf7c973a22012-08-13 12:50:35 +0200289 int r = 1; /* Indicate we want to get back into the guest */
290
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200291 /* We misuse TLB_FLUSH to indicate that we want to clear
292 all shadow cache entries */
293 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
294 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Graf7c973a22012-08-13 12:50:35 +0200295
296 return r;
Alexander Graf03d25c52012-08-10 12:28:50 +0200297}
298
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200299/************* MMU Notifiers *************/
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000300static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
301 unsigned long end)
302{
303 long i;
304 struct kvm_vcpu *vcpu;
305 struct kvm_memslots *slots;
306 struct kvm_memory_slot *memslot;
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200307
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000308 slots = kvm_memslots(kvm);
309 kvm_for_each_memslot(memslot, slots) {
310 unsigned long hva_start, hva_end;
311 gfn_t gfn, gfn_end;
312
313 hva_start = max(start, memslot->userspace_addr);
314 hva_end = min(end, memslot->userspace_addr +
315 (memslot->npages << PAGE_SHIFT));
316 if (hva_start >= hva_end)
317 continue;
318 /*
319 * {gfn(page) | page intersects with [hva_start, hva_end)} =
320 * {gfn, gfn+1, ..., gfn_end-1}.
321 */
322 gfn = hva_to_gfn_memslot(hva_start, memslot);
323 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
324 kvm_for_each_vcpu(i, vcpu, kvm)
325 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
326 gfn_end << PAGE_SHIFT);
327 }
328}
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200329
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530330static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
331 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200332{
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000333 do_kvm_unmap_hva(kvm, start, end);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200334
335 return 0;
336}
337
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700338static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
339 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200340{
341 /* XXX could be more clever ;) */
342 return 0;
343}
344
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530345static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200346{
347 /* XXX could be more clever ;) */
348 return 0;
349}
350
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530351static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200352{
353 /* The page will get remapped properly on its next fault */
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000354 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200355}
356
357/*****************************************/
358
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530359static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000360{
Alexander Graf5deb8e72014-04-24 13:46:24 +0200361 ulong old_msr = kvmppc_get_msr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000362
363#ifdef EXIT_DEBUG
364 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
365#endif
366
367 msr &= to_book3s(vcpu)->msr_mask;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200368 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000369 kvmppc_recalc_shadow_msr(vcpu);
370
371 if (msr & MSR_POW) {
372 if (!vcpu->arch.pending_exceptions) {
373 kvm_vcpu_block(vcpu);
Radim Krčmář72875d82017-04-26 22:32:19 +0200374 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000375 vcpu->stat.halt_wakeup++;
376
377 /* Unset POW bit after we woke up */
378 msr &= ~MSR_POW;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200379 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000380 }
381 }
382
Alexander Grafc01e3f62014-07-11 02:58:58 +0200383 if (kvmppc_is_split_real(vcpu))
384 kvmppc_fixup_split_real(vcpu);
385 else
386 kvmppc_unfixup_split_real(vcpu);
387
Alexander Graf5deb8e72014-04-24 13:46:24 +0200388 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000389 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
390 kvmppc_mmu_flush_segments(vcpu);
391 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
392
393 /* Preload magic page segment when in kernel mode */
394 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
395 struct kvm_vcpu_arch *a = &vcpu->arch;
396
397 if (msr & MSR_DR)
398 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
399 else
400 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
401 }
402 }
403
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000404 /*
405 * When switching from 32 to 64-bit, we may have a stale 32-bit
406 * magic page around, we need to flush it. Typically 32-bit magic
407 * page will be instanciated when calling into RTAS. Note: We
408 * assume that such transition only happens while in kernel mode,
409 * ie, we never transition from user 32-bit to kernel 64-bit with
410 * a 32-bit magic page around.
411 */
412 if (vcpu->arch.magic_page_pa &&
413 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
414 /* going from RTAS to normal kernel code */
415 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
416 ~0xFFFUL);
417 }
418
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000419 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200420 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000421 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
422}
423
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530424void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000425{
426 u32 host_pvr;
427
428 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
429 vcpu->arch.pvr = pvr;
430#ifdef CONFIG_PPC_BOOK3S_64
431 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
432 kvmppc_mmu_book3s_64_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200433 if (!to_book3s(vcpu)->hior_explicit)
434 to_book3s(vcpu)->hior = 0xfff00000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000435 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200436 vcpu->arch.cpu_type = KVM_CPU_3S_64;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000437 } else
438#endif
439 {
440 kvmppc_mmu_book3s_32_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200441 if (!to_book3s(vcpu)->hior_explicit)
442 to_book3s(vcpu)->hior = 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000443 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200444 vcpu->arch.cpu_type = KVM_CPU_3S_32;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000445 }
446
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200447 kvmppc_sanity_check(vcpu);
448
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000449 /* If we are in hypervisor level on 970, we can tell the CPU to
450 * treat DCBZ as 32 bytes store */
451 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
452 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
453 !strcmp(cur_cpu_spec->platform, "ppc970"))
454 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
455
456 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
457 really needs them in a VM on Cell and force disable them. */
458 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
459 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
460
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000461 /*
462 * If they're asking for POWER6 or later, set the flag
463 * indicating that we can do multiple large page sizes
464 * and 1TB segments.
465 * Also set the flag that indicates that tlbie has the large
466 * page bit in the RB operand instead of the instruction.
467 */
468 switch (PVR_VER(pvr)) {
469 case PVR_POWER6:
470 case PVR_POWER7:
471 case PVR_POWER7p:
472 case PVR_POWER8:
Thomas Huth2365f6b2016-09-21 13:53:46 +0200473 case PVR_POWER8E:
474 case PVR_POWER8NVL:
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000475 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
476 BOOK3S_HFLAG_NEW_TLBIE;
477 break;
478 }
479
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000480#ifdef CONFIG_PPC_BOOK3S_32
481 /* 32 bit Book3S always has 32 byte dcbz */
482 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
483#endif
484
485 /* On some CPUs we can execute paired single operations natively */
486 asm ( "mfpvr %0" : "=r"(host_pvr));
487 switch (host_pvr) {
488 case 0x00080200: /* lonestar 2.0 */
489 case 0x00088202: /* lonestar 2.2 */
490 case 0x70000100: /* gekko 1.0 */
491 case 0x00080100: /* gekko 2.0 */
492 case 0x00083203: /* gekko 2.3a */
493 case 0x00083213: /* gekko 2.3b */
494 case 0x00083204: /* gekko 2.4 */
495 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
496 case 0x00087200: /* broadway */
497 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
498 /* Enable HID2.PSE - in case we need it later */
499 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
500 }
501}
502
503/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
504 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
505 * emulate 32 bytes dcbz length.
506 *
507 * The Book3s_64 inventors also realized this case and implemented a special bit
508 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
509 *
510 * My approach here is to patch the dcbz instruction on executing pages.
511 */
512static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
513{
514 struct page *hpage;
515 u64 hpage_offset;
516 u32 *page;
517 int i;
518
519 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
Xiao Guangrong32cad842012-08-03 15:42:52 +0800520 if (is_error_page(hpage))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000521 return;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000522
523 hpage_offset = pte->raddr & ~PAGE_MASK;
524 hpage_offset &= ~0xFFFULL;
525 hpage_offset /= 4;
526
527 get_page(hpage);
Cong Wang2480b202011-11-25 23:14:16 +0800528 page = kmap_atomic(hpage);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000529
530 /* patch dcbz into reserved instruction, so we trap */
531 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
Alexander Grafcd087ee2014-04-24 13:52:01 +0200532 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
533 page[i] &= cpu_to_be32(0xfffffff7);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000534
Cong Wang2480b202011-11-25 23:14:16 +0800535 kunmap_atomic(page);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000536 put_page(hpage);
537}
538
Yaowei Bai378b4172015-11-16 11:10:24 +0800539static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000540{
541 ulong mp_pa = vcpu->arch.magic_page_pa;
542
Alexander Graf5deb8e72014-04-24 13:46:24 +0200543 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000544 mp_pa = (uint32_t)mp_pa;
545
Alexander Graf89b68c92014-07-13 16:37:12 +0200546 gpa &= ~0xFFFULL;
547 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
Yaowei Bai378b4172015-11-16 11:10:24 +0800548 return true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000549 }
550
Alexander Graf89b68c92014-07-13 16:37:12 +0200551 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000552}
553
554int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
555 ulong eaddr, int vec)
556{
557 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000558 bool iswrite = false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000559 int r = RESUME_GUEST;
560 int relocated;
561 int page_found = 0;
Alexey Kardashevskiy96df2262017-03-24 17:49:22 +1100562 struct kvmppc_pte pte = { 0 };
Alexander Graf5deb8e72014-04-24 13:46:24 +0200563 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
564 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000565 u64 vsid;
566
567 relocated = data ? dr : ir;
Paul Mackerras93b159b2013-09-20 14:52:51 +1000568 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
569 iswrite = true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000570
571 /* Resolve real address if translation turned on */
572 if (relocated) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000573 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000574 } else {
575 pte.may_execute = true;
576 pte.may_read = true;
577 pte.may_write = true;
578 pte.raddr = eaddr & KVM_PAM;
579 pte.eaddr = eaddr;
580 pte.vpage = eaddr >> 12;
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000581 pte.page_size = MMU_PAGE_64K;
Alexey Kardashevskiy6c7d47c2017-11-22 14:42:21 +1100582 pte.wimg = HPTE_R_M;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000583 }
584
Alexander Graf5deb8e72014-04-24 13:46:24 +0200585 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000586 case 0:
587 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
588 break;
589 case MSR_DR:
Alexander Grafc01e3f62014-07-11 02:58:58 +0200590 if (!data &&
591 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
592 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
593 pte.raddr &= ~SPLIT_HACK_MASK;
594 /* fall through */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000595 case MSR_IR:
596 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
597
Alexander Graf5deb8e72014-04-24 13:46:24 +0200598 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000599 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
600 else
601 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
602 pte.vpage |= vsid;
603
604 if (vsid == -1)
605 page_found = -EINVAL;
606 break;
607 }
608
609 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
610 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
611 /*
612 * If we do the dcbz hack, we have to NX on every execution,
613 * so we can patch the executing code. This renders our guest
614 * NX-less.
615 */
616 pte.may_execute = !data;
617 }
618
619 if (page_found == -ENOENT) {
620 /* Page not found in guest PTE entries */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200621 u64 ssrr1 = vcpu->arch.shadow_srr1;
622 u64 msr = kvmppc_get_msr(vcpu);
623 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
624 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
625 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000626 kvmppc_book3s_queue_irqprio(vcpu, vec);
627 } else if (page_found == -EPERM) {
628 /* Storage protection */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200629 u32 dsisr = vcpu->arch.fault_dsisr;
630 u64 ssrr1 = vcpu->arch.shadow_srr1;
631 u64 msr = kvmppc_get_msr(vcpu);
632 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
633 dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
634 kvmppc_set_dsisr(vcpu, dsisr);
635 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000636 kvmppc_book3s_queue_irqprio(vcpu, vec);
637 } else if (page_found == -EINVAL) {
638 /* Page not found in guest SLB */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200639 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000640 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
Alexey Kardashevskiy9eecec12017-03-24 17:47:13 +1100641 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000642 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
643 /*
644 * There is already a host HPTE there, presumably
645 * a read-only one for a page the guest thinks
646 * is writable, so get rid of it first.
647 */
648 kvmppc_mmu_unmap_page(vcpu, &pte);
649 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000650 /* The guest's PTE is not mapped yet. Map on the host */
Alexey Kardashevskiybd9166f2017-03-24 17:48:10 +1100651 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
652 /* Exit KVM if mapping failed */
653 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
654 return RESUME_HOST;
655 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000656 if (data)
657 vcpu->stat.sp_storage++;
658 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
Paul Mackerras93b159b2013-09-20 14:52:51 +1000659 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000660 kvmppc_patch_dcbz(vcpu, &pte);
661 } else {
662 /* MMIO */
663 vcpu->stat.mmio_exits++;
664 vcpu->arch.paddr_accessed = pte.raddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100665 vcpu->arch.vaddr_accessed = pte.eaddr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000666 r = kvmppc_emulate_mmio(run, vcpu);
667 if ( r == RESUME_HOST_NV )
668 r = RESUME_HOST;
669 }
670
671 return r;
672}
673
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000674/* Give up external provider (FPU, Altivec, VSX) */
675void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
676{
677 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000678
Paul Mackerras28c483b2012-11-04 18:16:46 +0000679 /*
680 * VSX instructions can access FP and vector registers, so if
681 * we are giving up VSX, make sure we give up FP and VMX as well.
682 */
683 if (msr & MSR_VSX)
684 msr |= MSR_FP | MSR_VEC;
685
686 msr &= vcpu->arch.guest_owned_ext;
687 if (!msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000688 return;
689
690#ifdef DEBUG_EXT
691 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
692#endif
693
Paul Mackerras28c483b2012-11-04 18:16:46 +0000694 if (msr & MSR_FP) {
695 /*
696 * Note that on CPUs with VSX, giveup_fpu stores
697 * both the traditional FP registers and the added VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000698 * registers into thread.fp_state.fpr[].
Paul Mackerras28c483b2012-11-04 18:16:46 +0000699 */
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100700 if (t->regs->msr & MSR_FP)
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000701 giveup_fpu(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100702 t->fp_save_area = NULL;
Paul Mackerras28c483b2012-11-04 18:16:46 +0000703 }
704
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000705#ifdef CONFIG_ALTIVEC
Paul Mackerras28c483b2012-11-04 18:16:46 +0000706 if (msr & MSR_VEC) {
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000707 if (current->thread.regs->msr & MSR_VEC)
708 giveup_altivec(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100709 t->vr_save_area = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000710 }
Paul Mackerras28c483b2012-11-04 18:16:46 +0000711#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000712
Paul Mackerras28c483b2012-11-04 18:16:46 +0000713 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000714 kvmppc_recalc_shadow_msr(vcpu);
715}
716
Alexander Graf616dff82014-04-29 16:48:44 +0200717/* Give up facility (TAR / EBB / DSCR) */
718static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
719{
720#ifdef CONFIG_PPC_BOOK3S_64
721 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
722 /* Facility not available to the guest, ignore giveup request*/
723 return;
724 }
Alexander Grafe14e7a12014-04-22 12:26:58 +0200725
726 switch (fac) {
727 case FSCR_TAR_LG:
728 vcpu->arch.tar = mfspr(SPRN_TAR);
729 mtspr(SPRN_TAR, current->thread.tar);
730 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
731 break;
732 }
Alexander Graf616dff82014-04-29 16:48:44 +0200733#endif
734}
735
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000736/* Handle external providers (FPU, Altivec, VSX) */
737static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
738 ulong msr)
739{
740 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000741
742 /* When we have paired singles, we emulate in software */
743 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
744 return RESUME_GUEST;
745
Alexander Graf5deb8e72014-04-24 13:46:24 +0200746 if (!(kvmppc_get_msr(vcpu) & msr)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000747 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
748 return RESUME_GUEST;
749 }
750
Paul Mackerras28c483b2012-11-04 18:16:46 +0000751 if (msr == MSR_VSX) {
752 /* No VSX? Give an illegal instruction interrupt */
753#ifdef CONFIG_VSX
754 if (!cpu_has_feature(CPU_FTR_VSX))
755#endif
756 {
757 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
758 return RESUME_GUEST;
759 }
760
761 /*
762 * We have to load up all the FP and VMX registers before
763 * we can let the guest use VSX instructions.
764 */
765 msr = MSR_FP | MSR_VEC | MSR_VSX;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000766 }
767
Paul Mackerras28c483b2012-11-04 18:16:46 +0000768 /* See if we already own all the ext(s) needed */
769 msr &= ~vcpu->arch.guest_owned_ext;
770 if (!msr)
771 return RESUME_GUEST;
772
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000773#ifdef DEBUG_EXT
774 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
775#endif
776
Paul Mackerras28c483b2012-11-04 18:16:46 +0000777 if (msr & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530778 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100779 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100780 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100781 disable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100782 t->fp_save_area = &vcpu->arch.fp;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530783 preempt_enable();
Paul Mackerras28c483b2012-11-04 18:16:46 +0000784 }
785
786 if (msr & MSR_VEC) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000787#ifdef CONFIG_ALTIVEC
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530788 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100789 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100790 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100791 disable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100792 t->vr_save_area = &vcpu->arch.vr;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530793 preempt_enable();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000794#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000795 }
796
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100797 t->regs->msr |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000798 vcpu->arch.guest_owned_ext |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000799 kvmppc_recalc_shadow_msr(vcpu);
800
801 return RESUME_GUEST;
802}
803
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000804/*
805 * Kernel code using FP or VMX could have flushed guest state to
806 * the thread_struct; if so, get it back now.
807 */
808static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
809{
810 unsigned long lost_ext;
811
812 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
813 if (!lost_ext)
814 return;
815
Paul Mackerras09548fd2013-10-15 20:43:01 +1100816 if (lost_ext & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530817 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100818 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100819 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100820 disable_kernel_fp();
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530821 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100822 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000823#ifdef CONFIG_ALTIVEC
Paul Mackerras09548fd2013-10-15 20:43:01 +1100824 if (lost_ext & MSR_VEC) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530825 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100826 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100827 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100828 disable_kernel_altivec();
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530829 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100830 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000831#endif
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000832 current->thread.regs->msr |= lost_ext;
833}
834
Alexander Graf616dff82014-04-29 16:48:44 +0200835#ifdef CONFIG_PPC_BOOK3S_64
836
837static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
838{
839 /* Inject the Interrupt Cause field and trigger a guest interrupt */
840 vcpu->arch.fscr &= ~(0xffULL << 56);
841 vcpu->arch.fscr |= (fac << 56);
842 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
843}
844
845static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
846{
847 enum emulation_result er = EMULATE_FAIL;
848
849 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
850 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
851
852 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
853 /* Couldn't emulate, trigger interrupt in guest */
854 kvmppc_trigger_fac_interrupt(vcpu, fac);
855 }
856}
857
858/* Enable facilities (TAR, EBB, DSCR) for the guest */
859static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
860{
Alexander Graf9916d572014-04-29 17:54:40 +0200861 bool guest_fac_enabled;
Alexander Graf616dff82014-04-29 16:48:44 +0200862 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
863
Alexander Graf9916d572014-04-29 17:54:40 +0200864 /*
865 * Not every facility is enabled by FSCR bits, check whether the
866 * guest has this facility enabled at all.
867 */
868 switch (fac) {
869 case FSCR_TAR_LG:
870 case FSCR_EBB_LG:
871 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
872 break;
873 case FSCR_TM_LG:
874 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
875 break;
876 default:
877 guest_fac_enabled = false;
878 break;
879 }
880
881 if (!guest_fac_enabled) {
Alexander Graf616dff82014-04-29 16:48:44 +0200882 /* Facility not enabled by the guest */
883 kvmppc_trigger_fac_interrupt(vcpu, fac);
884 return RESUME_GUEST;
885 }
886
887 switch (fac) {
Alexander Grafe14e7a12014-04-22 12:26:58 +0200888 case FSCR_TAR_LG:
889 /* TAR switching isn't lazy in Linux yet */
890 current->thread.tar = mfspr(SPRN_TAR);
891 mtspr(SPRN_TAR, vcpu->arch.tar);
892 vcpu->arch.shadow_fscr |= FSCR_TAR;
893 break;
Alexander Graf616dff82014-04-29 16:48:44 +0200894 default:
895 kvmppc_emulate_fac(vcpu, fac);
896 break;
897 }
898
899 return RESUME_GUEST;
900}
Alexander Graf8e6afa32014-07-31 10:21:59 +0200901
902void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
903{
904 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
905 /* TAR got dropped, drop it in shadow too */
906 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
907 }
908 vcpu->arch.fscr = fscr;
909}
Alexander Graf616dff82014-04-29 16:48:44 +0200910#endif
911
Laurent Vivier11dd6ac2016-04-08 18:05:00 +0200912static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
913{
914 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
915 u64 msr = kvmppc_get_msr(vcpu);
916
917 kvmppc_set_msr(vcpu, msr | MSR_SE);
918 }
919}
920
921static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
922{
923 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
924 u64 msr = kvmppc_get_msr(vcpu);
925
926 kvmppc_set_msr(vcpu, msr & ~MSR_SE);
927 }
928}
929
Thomas Huthfcd4f3c2017-01-25 13:27:22 +0100930static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
931 unsigned int exit_nr)
932{
933 enum emulation_result er;
934 ulong flags;
935 u32 last_inst;
936 int emul, r;
937
938 /*
939 * shadow_srr1 only contains valid flags if we came here via a program
940 * exception. The other exceptions (emulation assist, FP unavailable,
941 * etc.) do not provide flags in SRR1, so use an illegal-instruction
942 * exception when injecting a program interrupt into the guest.
943 */
944 if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
945 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
946 else
947 flags = SRR1_PROGILL;
948
949 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
950 if (emul != EMULATE_DONE)
951 return RESUME_GUEST;
952
953 if (kvmppc_get_msr(vcpu) & MSR_PR) {
954#ifdef EXIT_DEBUG
955 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
956 kvmppc_get_pc(vcpu), last_inst);
957#endif
958 if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
959 kvmppc_core_queue_program(vcpu, flags);
960 return RESUME_GUEST;
961 }
962 }
963
964 vcpu->stat.emulated_inst_exits++;
965 er = kvmppc_emulate_instruction(run, vcpu);
966 switch (er) {
967 case EMULATE_DONE:
968 r = RESUME_GUEST_NV;
969 break;
970 case EMULATE_AGAIN:
971 r = RESUME_GUEST;
972 break;
973 case EMULATE_FAIL:
974 pr_crit("%s: emulation at %lx failed (%08x)\n",
975 __func__, kvmppc_get_pc(vcpu), last_inst);
976 kvmppc_core_queue_program(vcpu, flags);
977 r = RESUME_GUEST;
978 break;
979 case EMULATE_DO_MMIO:
980 run->exit_reason = KVM_EXIT_MMIO;
981 r = RESUME_HOST_NV;
982 break;
983 case EMULATE_EXIT_USER:
984 r = RESUME_HOST_NV;
985 break;
986 default:
987 BUG();
988 }
989
990 return r;
991}
992
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530993int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
994 unsigned int exit_nr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000995{
996 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200997 int s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000998
999 vcpu->stat.sum_exits++;
1000
1001 run->exit_reason = KVM_EXIT_UNKNOWN;
1002 run->ready_for_interrupt_injection = 1;
1003
Alexander Grafbd2be682012-08-13 01:04:19 +02001004 /* We get here with MSR.EE=1 */
Alexander Graf3b1d9d72012-04-30 10:56:12 +02001005
Alexander Graf97c95052012-08-02 15:10:00 +02001006 trace_kvm_exit(exit_nr, vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001007 guest_exit();
Alexander Grafc63ddcb2012-08-12 11:27:49 +02001008
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001009 switch (exit_nr) {
1010 case BOOK3S_INTERRUPT_INST_STORAGE:
Alexander Graf468a12c2011-12-09 14:44:13 +01001011 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001012 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001013 vcpu->stat.pf_instruc++;
1014
Alexander Grafc01e3f62014-07-11 02:58:58 +02001015 if (kvmppc_is_split_real(vcpu))
1016 kvmppc_fixup_split_real(vcpu);
1017
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001018#ifdef CONFIG_PPC_BOOK3S_32
1019 /* We set segments as unused segments when invalidating them. So
1020 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001021 {
1022 struct kvmppc_book3s_shadow_vcpu *svcpu;
1023 u32 sr;
1024
1025 svcpu = svcpu_get(vcpu);
1026 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +01001027 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001028 if (sr == SR_INVALID) {
1029 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1030 r = RESUME_GUEST;
1031 break;
1032 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001033 }
1034#endif
1035
1036 /* only care about PTEG not found errors, but leave NX alone */
Alexander Graf468a12c2011-12-09 14:44:13 +01001037 if (shadow_srr1 & 0x40000000) {
Paul Mackerras93b159b2013-09-20 14:52:51 +10001038 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001039 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +10001040 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001041 vcpu->stat.sp_instruc++;
1042 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1043 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1044 /*
1045 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1046 * so we can't use the NX bit inside the guest. Let's cross our fingers,
1047 * that no guest that needs the dcbz hack does NX.
1048 */
1049 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1050 r = RESUME_GUEST;
1051 } else {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001052 u64 msr = kvmppc_get_msr(vcpu);
1053 msr |= shadow_srr1 & 0x58000000;
1054 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001055 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1056 r = RESUME_GUEST;
1057 }
1058 break;
Alexander Graf468a12c2011-12-09 14:44:13 +01001059 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001060 case BOOK3S_INTERRUPT_DATA_STORAGE:
1061 {
1062 ulong dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001063 u32 fault_dsisr = vcpu->arch.fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001064 vcpu->stat.pf_storage++;
1065
1066#ifdef CONFIG_PPC_BOOK3S_32
1067 /* We set segments as unused segments when invalidating them. So
1068 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001069 {
1070 struct kvmppc_book3s_shadow_vcpu *svcpu;
1071 u32 sr;
1072
1073 svcpu = svcpu_get(vcpu);
1074 sr = svcpu->sr[dar >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +01001075 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001076 if (sr == SR_INVALID) {
1077 kvmppc_mmu_map_segment(vcpu, dar);
1078 r = RESUME_GUEST;
1079 break;
1080 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001081 }
1082#endif
1083
Paul Mackerras93b159b2013-09-20 14:52:51 +10001084 /*
1085 * We need to handle missing shadow PTEs, and
1086 * protection faults due to us mapping a page read-only
1087 * when the guest thinks it is writable.
1088 */
1089 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
1090 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001091 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +10001092 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001093 } else {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001094 kvmppc_set_dar(vcpu, dar);
1095 kvmppc_set_dsisr(vcpu, fault_dsisr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001096 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1097 r = RESUME_GUEST;
1098 }
1099 break;
1100 }
1101 case BOOK3S_INTERRUPT_DATA_SEGMENT:
1102 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001103 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001104 kvmppc_book3s_queue_irqprio(vcpu,
1105 BOOK3S_INTERRUPT_DATA_SEGMENT);
1106 }
1107 r = RESUME_GUEST;
1108 break;
1109 case BOOK3S_INTERRUPT_INST_SEGMENT:
1110 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1111 kvmppc_book3s_queue_irqprio(vcpu,
1112 BOOK3S_INTERRUPT_INST_SEGMENT);
1113 }
1114 r = RESUME_GUEST;
1115 break;
1116 /* We're good on these - the host merely wanted to get our attention */
1117 case BOOK3S_INTERRUPT_DECREMENTER:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001118 case BOOK3S_INTERRUPT_HV_DECREMENTER:
Paul Mackerras40688902014-01-08 21:25:36 +11001119 case BOOK3S_INTERRUPT_DOORBELL:
Alexander Graf568fccc2014-06-16 16:37:38 +02001120 case BOOK3S_INTERRUPT_H_DOORBELL:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001121 vcpu->stat.dec_exits++;
1122 r = RESUME_GUEST;
1123 break;
1124 case BOOK3S_INTERRUPT_EXTERNAL:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001125 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
1126 case BOOK3S_INTERRUPT_EXTERNAL_HV:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001127 vcpu->stat.ext_intr_exits++;
1128 r = RESUME_GUEST;
1129 break;
1130 case BOOK3S_INTERRUPT_PERFMON:
1131 r = RESUME_GUEST;
1132 break;
1133 case BOOK3S_INTERRUPT_PROGRAM:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001134 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001135 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001136 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001137 case BOOK3S_INTERRUPT_SYSCALL:
Mihai Caraman51f04722014-07-23 19:06:21 +03001138 {
1139 u32 last_sc;
1140 int emul;
1141
1142 /* Get last sc for papr */
1143 if (vcpu->arch.papr_enabled) {
1144 /* The sc instuction points SRR0 to the next inst */
1145 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1146 if (emul != EMULATE_DONE) {
1147 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1148 r = RESUME_GUEST;
1149 break;
1150 }
1151 }
1152
Alexander Grafa668f2b2011-08-08 17:26:24 +02001153 if (vcpu->arch.papr_enabled &&
Mihai Caraman51f04722014-07-23 19:06:21 +03001154 (last_sc == 0x44000022) &&
Alexander Graf5deb8e72014-04-24 13:46:24 +02001155 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
Alexander Grafa668f2b2011-08-08 17:26:24 +02001156 /* SC 1 papr hypercalls */
1157 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1158 int i;
1159
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301160#ifdef CONFIG_PPC_BOOK3S_64
Alexander Grafa668f2b2011-08-08 17:26:24 +02001161 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1162 r = RESUME_GUEST;
1163 break;
1164 }
Andreas Schwab96f38d72011-11-08 07:17:39 +00001165#endif
Alexander Grafa668f2b2011-08-08 17:26:24 +02001166
1167 run->papr_hcall.nr = cmd;
1168 for (i = 0; i < 9; ++i) {
1169 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1170 run->papr_hcall.args[i] = gpr;
1171 }
1172 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1173 vcpu->arch.hcall_needed = 1;
1174 r = RESUME_HOST;
1175 } else if (vcpu->arch.osi_enabled &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001176 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1177 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1178 /* MOL hypercalls */
1179 u64 *gprs = run->osi.gprs;
1180 int i;
1181
1182 run->exit_reason = KVM_EXIT_OSI;
1183 for (i = 0; i < 32; i++)
1184 gprs[i] = kvmppc_get_gpr(vcpu, i);
1185 vcpu->arch.osi_needed = 1;
1186 r = RESUME_HOST_NV;
Alexander Graf5deb8e72014-04-24 13:46:24 +02001187 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001188 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1189 /* KVM PV hypercalls */
1190 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1191 r = RESUME_GUEST;
1192 } else {
1193 /* Guest syscalls */
1194 vcpu->stat.syscall_exits++;
1195 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1196 r = RESUME_GUEST;
1197 }
1198 break;
Mihai Caraman51f04722014-07-23 19:06:21 +03001199 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001200 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1201 case BOOK3S_INTERRUPT_ALTIVEC:
1202 case BOOK3S_INTERRUPT_VSX:
1203 {
1204 int ext_msr = 0;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001205 int emul;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001206 u32 last_inst;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001207
Mihai Caraman9a26af62014-07-23 19:06:20 +03001208 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1209 /* Do paired single instruction emulation */
Mihai Caraman51f04722014-07-23 19:06:21 +03001210 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1211 &last_inst);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001212 if (emul == EMULATE_DONE)
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001213 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001214 else
1215 r = RESUME_GUEST;
1216
1217 break;
1218 }
1219
1220 /* Enable external provider */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001221 switch (exit_nr) {
Mihai Caraman9a26af62014-07-23 19:06:20 +03001222 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1223 ext_msr = MSR_FP;
1224 break;
1225
1226 case BOOK3S_INTERRUPT_ALTIVEC:
1227 ext_msr = MSR_VEC;
1228 break;
1229
1230 case BOOK3S_INTERRUPT_VSX:
1231 ext_msr = MSR_VSX;
1232 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001233 }
1234
Mihai Caraman9a26af62014-07-23 19:06:20 +03001235 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001236 break;
1237 }
1238 case BOOK3S_INTERRUPT_ALIGNMENT:
Mihai Caraman9a26af62014-07-23 19:06:20 +03001239 {
Mihai Caraman51f04722014-07-23 19:06:21 +03001240 u32 last_inst;
1241 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001242
1243 if (emul == EMULATE_DONE) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001244 u32 dsisr;
1245 u64 dar;
1246
1247 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1248 dar = kvmppc_alignment_dar(vcpu, last_inst);
1249
1250 kvmppc_set_dsisr(vcpu, dsisr);
1251 kvmppc_set_dar(vcpu, dar);
1252
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001253 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1254 }
1255 r = RESUME_GUEST;
1256 break;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001257 }
Alexander Graf616dff82014-04-29 16:48:44 +02001258#ifdef CONFIG_PPC_BOOK3S_64
1259 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1260 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1261 r = RESUME_GUEST;
1262 break;
1263#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001264 case BOOK3S_INTERRUPT_MACHINE_CHECK:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001265 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1266 r = RESUME_GUEST;
1267 break;
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001268 case BOOK3S_INTERRUPT_TRACE:
1269 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1270 run->exit_reason = KVM_EXIT_DEBUG;
1271 r = RESUME_HOST;
1272 } else {
1273 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1274 r = RESUME_GUEST;
1275 }
1276 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001277 default:
Alexander Graf468a12c2011-12-09 14:44:13 +01001278 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001279 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001280 /* Ugh - bork here! What did we get? */
1281 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
Alexander Graf468a12c2011-12-09 14:44:13 +01001282 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001283 r = RESUME_HOST;
1284 BUG();
1285 break;
1286 }
Alexander Graf468a12c2011-12-09 14:44:13 +01001287 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001288
1289 if (!(r & RESUME_HOST)) {
1290 /* To avoid clobbering exit_reason, only check for signals if
1291 * we aren't already exiting to userspace for some other
1292 * reason. */
Alexander Grafe371f712011-12-19 13:36:55 +01001293
1294 /*
1295 * Interrupts could be timers for the guest which we have to
1296 * inject again, so let's postpone them until we're in the guest
1297 * and if we really did time things so badly, then we just exit
1298 * again due to a host external interrupt.
1299 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001300 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001301 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001302 r = s;
Scott Wood6c85f522014-01-09 19:18:40 -06001303 else {
1304 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001305 kvmppc_fix_ee_before_entry();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001306 }
Scott Wood6c85f522014-01-09 19:18:40 -06001307
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +10001308 kvmppc_handle_lost_ext(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001309 }
1310
1311 trace_kvm_book3s_reenter(r, vcpu);
1312
1313 return r;
1314}
1315
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301316static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1317 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001318{
1319 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1320 int i;
1321
1322 sregs->pvr = vcpu->arch.pvr;
1323
1324 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1325 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1326 for (i = 0; i < 64; i++) {
1327 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1328 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1329 }
1330 } else {
1331 for (i = 0; i < 16; i++)
Alexander Graf5deb8e72014-04-24 13:46:24 +02001332 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001333
1334 for (i = 0; i < 8; i++) {
1335 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1336 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1337 }
1338 }
1339
1340 return 0;
1341}
1342
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301343static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1344 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001345{
1346 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1347 int i;
1348
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301349 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001350
1351 vcpu3s->sdr1 = sregs->u.s.sdr1;
Greg Kurzf4093ee2017-10-16 12:29:44 +02001352#ifdef CONFIG_PPC_BOOK3S_64
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001353 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
Greg Kurzf4093ee2017-10-16 12:29:44 +02001354 /* Flush all SLB entries */
1355 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1356 vcpu->arch.mmu.slbia(vcpu);
1357
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001358 for (i = 0; i < 64; i++) {
Greg Kurzf4093ee2017-10-16 12:29:44 +02001359 u64 rb = sregs->u.s.ppc64.slb[i].slbe;
1360 u64 rs = sregs->u.s.ppc64.slb[i].slbv;
1361
1362 if (rb & SLB_ESID_V)
1363 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001364 }
Greg Kurzf4093ee2017-10-16 12:29:44 +02001365 } else
1366#endif
1367 {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001368 for (i = 0; i < 16; i++) {
1369 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1370 }
1371 for (i = 0; i < 8; i++) {
1372 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1373 (u32)sregs->u.s.ppc32.ibat[i]);
1374 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1375 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1376 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1377 (u32)sregs->u.s.ppc32.dbat[i]);
1378 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1379 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1380 }
1381 }
1382
1383 /* Flush the MMU after messing with the segments */
1384 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1385
1386 return 0;
1387}
1388
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301389static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1390 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001391{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001392 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001393
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001394 switch (id) {
Madhavan Srinivasana59c1d92014-09-09 22:37:35 +05301395 case KVM_REG_PPC_DEBUG_INST:
1396 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1397 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001398 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001399 *val = get_reg_val(id, to_book3s(vcpu)->hior);
Paul Mackerras31f34382011-12-12 12:26:50 +00001400 break;
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001401 case KVM_REG_PPC_VTB:
1402 *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1403 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301404 case KVM_REG_PPC_LPCR:
Alexey Kardashevskiya0840242014-07-19 17:59:34 +10001405 case KVM_REG_PPC_LPCR_64:
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301406 /*
1407 * We are only interested in the LPCR_ILE bit
1408 */
1409 if (vcpu->arch.intr_msr & MSR_LE)
1410 *val = get_reg_val(id, LPCR_ILE);
1411 else
1412 *val = get_reg_val(id, 0);
1413 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001414 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001415 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001416 break;
1417 }
1418
1419 return r;
1420}
1421
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301422static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1423{
1424 if (new_lpcr & LPCR_ILE)
1425 vcpu->arch.intr_msr |= MSR_LE;
1426 else
1427 vcpu->arch.intr_msr &= ~MSR_LE;
1428}
1429
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301430static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1431 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001432{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001433 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001434
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001435 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001436 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001437 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1438 to_book3s(vcpu)->hior_explicit = true;
Paul Mackerras31f34382011-12-12 12:26:50 +00001439 break;
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001440 case KVM_REG_PPC_VTB:
1441 to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1442 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301443 case KVM_REG_PPC_LPCR:
Alexey Kardashevskiya0840242014-07-19 17:59:34 +10001444 case KVM_REG_PPC_LPCR_64:
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301445 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1446 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001447 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001448 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001449 break;
1450 }
1451
1452 return r;
1453}
1454
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301455static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1456 unsigned int id)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001457{
1458 struct kvmppc_vcpu_book3s *vcpu_book3s;
1459 struct kvm_vcpu *vcpu;
1460 int err = -ENOMEM;
1461 unsigned long p;
1462
Paul Mackerras3ff95502013-09-20 14:52:49 +10001463 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1464 if (!vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001465 goto out;
1466
Paul Mackerras3ff95502013-09-20 14:52:49 +10001467 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1468 if (!vcpu_book3s)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001469 goto free_vcpu;
Paul Mackerras3ff95502013-09-20 14:52:49 +10001470 vcpu->arch.book3s = vcpu_book3s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001471
Alexander Grafab784752014-04-06 23:31:48 +02001472#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001473 vcpu->arch.shadow_vcpu =
1474 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1475 if (!vcpu->arch.shadow_vcpu)
1476 goto free_vcpu3s;
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001477#endif
Paul Mackerras3ff95502013-09-20 14:52:49 +10001478
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001479 err = kvm_vcpu_init(vcpu, kvm, id);
1480 if (err)
1481 goto free_shadow_vcpu;
1482
Thadeu Lima de Souza Cascardo7c7b4062013-07-17 12:10:29 -03001483 err = -ENOMEM;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001484 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001485 if (!p)
1486 goto uninit_vcpu;
Alexander Graf89b68c92014-07-13 16:37:12 +02001487 vcpu->arch.shared = (void *)p;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001488#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf5deb8e72014-04-24 13:46:24 +02001489 /* Always start the shared struct in native endian mode */
1490#ifdef __BIG_ENDIAN__
1491 vcpu->arch.shared_big_endian = true;
1492#else
1493 vcpu->arch.shared_big_endian = false;
1494#endif
1495
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001496 /*
1497 * Default to the same as the host if we're on sufficiently
1498 * recent machine that we have 1TB segments;
1499 * otherwise default to PPC970FX.
1500 */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001501 vcpu->arch.pvr = 0x3C0301;
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001502 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1503 vcpu->arch.pvr = mfspr(SPRN_PVR);
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301504 vcpu->arch.intr_msr = MSR_SF;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001505#else
1506 /* default to book3s_32 (750) */
1507 vcpu->arch.pvr = 0x84202;
1508#endif
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301509 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001510 vcpu->arch.slb_nr = 64;
1511
Alexander Graf94810ba2014-04-24 13:04:01 +02001512 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001513
1514 err = kvmppc_mmu_init(vcpu);
1515 if (err < 0)
1516 goto uninit_vcpu;
1517
1518 return vcpu;
1519
1520uninit_vcpu:
1521 kvm_vcpu_uninit(vcpu);
1522free_shadow_vcpu:
Alexander Grafab784752014-04-06 23:31:48 +02001523#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001524 kfree(vcpu->arch.shadow_vcpu);
1525free_vcpu3s:
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001526#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001527 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001528free_vcpu:
1529 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001530out:
1531 return ERR_PTR(err);
1532}
1533
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301534static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001535{
1536 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1537
1538 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1539 kvm_vcpu_uninit(vcpu);
Alexander Grafab784752014-04-06 23:31:48 +02001540#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001541 kfree(vcpu->arch.shadow_vcpu);
1542#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001543 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001544 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001545}
1546
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301547static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001548{
1549 int ret;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001550#ifdef CONFIG_ALTIVEC
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001551 unsigned long uninitialized_var(vrsave);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001552#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001553
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001554 /* Check if we can run the vcpu at all */
1555 if (!vcpu->arch.sane) {
1556 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
Alexander Graf7d827142011-12-09 15:46:21 +01001557 ret = -EINVAL;
1558 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001559 }
1560
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001561 kvmppc_setup_debug(vcpu);
1562
Alexander Grafe371f712011-12-19 13:36:55 +01001563 /*
1564 * Interrupts could be timers for the guest which we have to inject
1565 * again, so let's postpone them until we're in the guest and if we
1566 * really did time things so badly, then we just exit again due to
1567 * a host external interrupt.
1568 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001569 ret = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001570 if (ret <= 0)
Alexander Graf7d827142011-12-09 15:46:21 +01001571 goto out;
Scott Wood6c85f522014-01-09 19:18:40 -06001572 /* interrupts now hard-disabled */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001573
Anton Blanchardc2085052015-10-29 11:44:08 +11001574 /* Save FPU, Altivec and VSX state */
1575 giveup_all(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001576
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001577 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +02001578 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001579 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1580
Scott Wood5f1c2482013-07-10 17:47:39 -05001581 kvmppc_fix_ee_before_entry();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001582
1583 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1584
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001585 kvmppc_clear_debug(vcpu);
1586
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001587 /* No need for guest_exit. It's done in handle_exit.
Alexander Graf24afa372012-08-12 12:42:30 +02001588 We also get here with interrupts enabled. */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001589
Paul Mackerras28c483b2012-11-04 18:16:46 +00001590 /* Make sure we save the guest FPU/Altivec/VSX state */
1591 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1592
Alexander Grafe14e7a12014-04-22 12:26:58 +02001593 /* Make sure we save the guest TAR/EBB/DSCR state */
1594 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1595
Alexander Graf7d827142011-12-09 15:46:21 +01001596out:
Alexander Graf0652eaa2012-08-12 11:34:21 +02001597 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001598 return ret;
1599}
1600
Paul Mackerras82ed3612011-12-15 02:03:22 +00001601/*
1602 * Get (and clear) the dirty memory log for a memory slot.
1603 */
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301604static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1605 struct kvm_dirty_log *log)
Paul Mackerras82ed3612011-12-15 02:03:22 +00001606{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001607 struct kvm_memslots *slots;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001608 struct kvm_memory_slot *memslot;
1609 struct kvm_vcpu *vcpu;
1610 ulong ga, ga_end;
1611 int is_dirty = 0;
1612 int r;
1613 unsigned long n;
1614
1615 mutex_lock(&kvm->slots_lock);
1616
1617 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1618 if (r)
1619 goto out;
1620
1621 /* If nothing is dirty, don't bother messing with page tables. */
1622 if (is_dirty) {
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001623 slots = kvm_memslots(kvm);
1624 memslot = id_to_memslot(slots, log->slot);
Paul Mackerras82ed3612011-12-15 02:03:22 +00001625
1626 ga = memslot->base_gfn << PAGE_SHIFT;
1627 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1628
1629 kvm_for_each_vcpu(n, vcpu, kvm)
1630 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1631
1632 n = kvm_dirty_bitmap_bytes(memslot);
1633 memset(memslot->dirty_bitmap, 0, n);
1634 }
1635
1636 r = 0;
1637out:
1638 mutex_unlock(&kvm->slots_lock);
1639 return r;
1640}
1641
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301642static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1643 struct kvm_memory_slot *memslot)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001644{
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301645 return;
1646}
1647
1648static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1649 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001650 const struct kvm_userspace_memory_region *mem)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301651{
1652 return 0;
1653}
1654
1655static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001656 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001657 const struct kvm_memory_slot *old,
1658 const struct kvm_memory_slot *new)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301659{
1660 return;
1661}
1662
1663static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1664 struct kvm_memory_slot *dont)
1665{
1666 return;
1667}
1668
1669static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1670 unsigned long npages)
1671{
1672 return 0;
1673}
1674
1675
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001676#ifdef CONFIG_PPC64
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301677static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1678 struct kvm_ppc_smmu_info *info)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001679{
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001680 long int i;
1681 struct kvm_vcpu *vcpu;
1682
1683 info->flags = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001684
1685 /* SLB is always 64 entries */
1686 info->slb_size = 64;
1687
1688 /* Standard 4k base page size segment */
1689 info->sps[0].page_shift = 12;
1690 info->sps[0].slb_enc = 0;
1691 info->sps[0].enc[0].page_shift = 12;
1692 info->sps[0].enc[0].pte_enc = 0;
1693
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001694 /*
1695 * 64k large page size.
1696 * We only want to put this in if the CPUs we're emulating
1697 * support it, but unfortunately we don't have a vcpu easily
1698 * to hand here to test. Just pick the first vcpu, and if
1699 * that doesn't exist yet, report the minimum capability,
1700 * i.e., no 64k pages.
1701 * 1T segment support goes along with 64k pages.
1702 */
1703 i = 1;
1704 vcpu = kvm_get_vcpu(kvm, 0);
1705 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1706 info->flags = KVM_PPC_1T_SEGMENTS;
1707 info->sps[i].page_shift = 16;
1708 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1709 info->sps[i].enc[0].page_shift = 16;
1710 info->sps[i].enc[0].pte_enc = 1;
1711 ++i;
1712 }
1713
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001714 /* Standard 16M large page size segment */
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001715 info->sps[i].page_shift = 24;
1716 info->sps[i].slb_enc = SLB_VSID_L;
1717 info->sps[i].enc[0].page_shift = 24;
1718 info->sps[i].enc[0].pte_enc = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001719
1720 return 0;
1721}
Paul Mackerras9617a0b2018-05-30 15:47:17 +10001722
1723static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
1724{
1725 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1726 return -ENODEV;
1727 /* Require flags and process table base and size to all be zero. */
1728 if (cfg->flags || cfg->process_table)
1729 return -EINVAL;
1730 return 0;
1731}
1732
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301733#else
1734static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1735 struct kvm_ppc_smmu_info *info)
1736{
1737 /* We should not get called */
1738 BUG();
1739}
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001740#endif /* CONFIG_PPC64 */
1741
Ian Munsiea413f472012-12-03 18:36:13 +00001742static unsigned int kvm_global_user_count = 0;
1743static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1744
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301745static int kvmppc_core_init_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001746{
Paul Mackerras9308ab82013-09-20 14:52:48 +10001747 mutex_init(&kvm->arch.hpt_mutex);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001748
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001749#ifdef CONFIG_PPC_BOOK3S_64
1750 /* Start out with the default set of hcalls enabled */
1751 kvmppc_pr_init_default_hcalls(kvm);
1752#endif
1753
Ian Munsiea413f472012-12-03 18:36:13 +00001754 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1755 spin_lock(&kvm_global_user_count_lock);
1756 if (++kvm_global_user_count == 1)
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +10001757 pseries_disable_reloc_on_exc();
Ian Munsiea413f472012-12-03 18:36:13 +00001758 spin_unlock(&kvm_global_user_count_lock);
1759 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001760 return 0;
1761}
1762
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301763static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001764{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001765#ifdef CONFIG_PPC64
1766 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1767#endif
Ian Munsiea413f472012-12-03 18:36:13 +00001768
1769 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1770 spin_lock(&kvm_global_user_count_lock);
1771 BUG_ON(kvm_global_user_count == 0);
1772 if (--kvm_global_user_count == 0)
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +10001773 pseries_enable_reloc_on_exc();
Ian Munsiea413f472012-12-03 18:36:13 +00001774 spin_unlock(&kvm_global_user_count_lock);
1775 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001776}
1777
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301778static int kvmppc_core_check_processor_compat_pr(void)
1779{
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10001780 /*
Paul Mackerrasec531d02018-05-18 21:49:28 +10001781 * PR KVM can work on POWER9 inside a guest partition
1782 * running in HPT mode. It can't work if we are using
1783 * radix translation (because radix provides no way for
1784 * a process to have unique translations in quadrant 3)
1785 * or in a bare-metal HPT-mode host (because POWER9
1786 * uses a modified HPTE format which the PR KVM code
1787 * has not been adapted to use).
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10001788 */
Paul Mackerrasec531d02018-05-18 21:49:28 +10001789 if (cpu_has_feature(CPU_FTR_ARCH_300) &&
1790 (radix_enabled() || cpu_has_feature(CPU_FTR_HVMODE)))
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10001791 return -EIO;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301792 return 0;
1793}
1794
1795static long kvm_arch_vm_ioctl_pr(struct file *filp,
1796 unsigned int ioctl, unsigned long arg)
1797{
1798 return -ENOTTY;
1799}
1800
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301801static struct kvmppc_ops kvm_ops_pr = {
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301802 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1803 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1804 .get_one_reg = kvmppc_get_one_reg_pr,
1805 .set_one_reg = kvmppc_set_one_reg_pr,
1806 .vcpu_load = kvmppc_core_vcpu_load_pr,
1807 .vcpu_put = kvmppc_core_vcpu_put_pr,
1808 .set_msr = kvmppc_set_msr_pr,
1809 .vcpu_run = kvmppc_vcpu_run_pr,
1810 .vcpu_create = kvmppc_core_vcpu_create_pr,
1811 .vcpu_free = kvmppc_core_vcpu_free_pr,
1812 .check_requests = kvmppc_core_check_requests_pr,
1813 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1814 .flush_memslot = kvmppc_core_flush_memslot_pr,
1815 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1816 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301817 .unmap_hva_range = kvm_unmap_hva_range_pr,
1818 .age_hva = kvm_age_hva_pr,
1819 .test_age_hva = kvm_test_age_hva_pr,
1820 .set_spte_hva = kvm_set_spte_hva_pr,
1821 .mmu_destroy = kvmppc_mmu_destroy_pr,
1822 .free_memslot = kvmppc_core_free_memslot_pr,
1823 .create_memslot = kvmppc_core_create_memslot_pr,
1824 .init_vm = kvmppc_core_init_vm_pr,
1825 .destroy_vm = kvmppc_core_destroy_vm_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301826 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1827 .emulate_op = kvmppc_core_emulate_op_pr,
1828 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1829 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1830 .fast_vcpu_kick = kvm_vcpu_kick,
1831 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
Paul Mackerrasae2113a2014-06-02 11:03:00 +10001832#ifdef CONFIG_PPC_BOOK3S_64
1833 .hcall_implemented = kvmppc_hcall_impl_pr,
Paul Mackerras9617a0b2018-05-30 15:47:17 +10001834 .configure_mmu = kvm_configure_mmu_pr,
Paul Mackerrasae2113a2014-06-02 11:03:00 +10001835#endif
Simon Guo2e6baa42018-05-21 13:24:22 +08001836 .giveup_ext = kvmppc_giveup_ext,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301837};
1838
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301839
1840int kvmppc_book3s_init_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001841{
1842 int r;
1843
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301844 r = kvmppc_core_check_processor_compat_pr();
1845 if (r < 0)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001846 return r;
1847
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301848 kvm_ops_pr.owner = THIS_MODULE;
1849 kvmppc_pr_ops = &kvm_ops_pr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001850
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301851 r = kvmppc_mmu_hpte_sysinit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001852 return r;
1853}
1854
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301855void kvmppc_book3s_exit_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001856{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301857 kvmppc_pr_ops = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001858 kvmppc_mmu_hpte_sysexit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001859}
1860
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301861/*
1862 * We only support separate modules for book3s 64
1863 */
1864#ifdef CONFIG_PPC_BOOK3S_64
1865
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301866module_init(kvmppc_book3s_init_pr);
1867module_exit(kvmppc_book3s_exit_pr);
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301868
1869MODULE_LICENSE("GPL");
Alexander Graf398a76c2013-12-09 13:53:42 +01001870MODULE_ALIAS_MISCDEV(KVM_MINOR);
1871MODULE_ALIAS("devname:kvm");
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301872#endif