blob: a221cae792dce43644071428a61b9c465a97cc16 [file] [log] [blame]
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +11001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corporation, 2018
4 * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
5 * Paul Mackerras <paulus@ozlabs.org>
6 *
7 * Description: KVM functions specific to running nested KVM-HV guests
8 * on Book3S processors (specifically POWER9 and later).
9 */
10
11#include <linux/kernel.h>
12#include <linux/kvm_host.h>
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +110013#include <linux/llist.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070014#include <linux/pgtable.h>
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +110015
16#include <asm/kvm_ppc.h>
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +110017#include <asm/kvm_book3s.h>
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +110018#include <asm/mmu.h>
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +110019#include <asm/pgalloc.h>
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +110020#include <asm/pte-walk.h>
21#include <asm/reg.h>
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +110022
23static struct patb_entry *pseries_partition_tb;
24
25static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +110026static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +110027
Paul Mackerras360cae32018-10-08 16:31:04 +110028void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
29{
30 struct kvmppc_vcore *vc = vcpu->arch.vcore;
31
Jordan Niethe13c7bb32019-09-17 10:46:05 +100032 hr->pcr = vc->pcr | PCR_MASK;
Paul Mackerras360cae32018-10-08 16:31:04 +110033 hr->dpdes = vc->dpdes;
34 hr->hfscr = vcpu->arch.hfscr;
35 hr->tb_offset = vc->tb_offset;
36 hr->dawr0 = vcpu->arch.dawr;
37 hr->dawrx0 = vcpu->arch.dawrx;
38 hr->ciabr = vcpu->arch.ciabr;
39 hr->purr = vcpu->arch.purr;
40 hr->spurr = vcpu->arch.spurr;
41 hr->ic = vcpu->arch.ic;
42 hr->vtb = vc->vtb;
43 hr->srr0 = vcpu->arch.shregs.srr0;
44 hr->srr1 = vcpu->arch.shregs.srr1;
45 hr->sprg[0] = vcpu->arch.shregs.sprg0;
46 hr->sprg[1] = vcpu->arch.shregs.sprg1;
47 hr->sprg[2] = vcpu->arch.shregs.sprg2;
48 hr->sprg[3] = vcpu->arch.shregs.sprg3;
49 hr->pidr = vcpu->arch.pid;
50 hr->cfar = vcpu->arch.cfar;
51 hr->ppr = vcpu->arch.ppr;
52}
53
Suraj Jitindar Singh10b50222018-10-08 16:31:15 +110054static void byteswap_pt_regs(struct pt_regs *regs)
55{
56 unsigned long *addr = (unsigned long *) regs;
57
58 for (; addr < ((unsigned long *) (regs + 1)); addr++)
59 *addr = swab64(*addr);
60}
61
62static void byteswap_hv_regs(struct hv_guest_state *hr)
63{
64 hr->version = swab64(hr->version);
65 hr->lpid = swab32(hr->lpid);
66 hr->vcpu_token = swab32(hr->vcpu_token);
67 hr->lpcr = swab64(hr->lpcr);
Jordan Niethe13c7bb32019-09-17 10:46:05 +100068 hr->pcr = swab64(hr->pcr) | PCR_MASK;
Suraj Jitindar Singh10b50222018-10-08 16:31:15 +110069 hr->amor = swab64(hr->amor);
70 hr->dpdes = swab64(hr->dpdes);
71 hr->hfscr = swab64(hr->hfscr);
72 hr->tb_offset = swab64(hr->tb_offset);
73 hr->dawr0 = swab64(hr->dawr0);
74 hr->dawrx0 = swab64(hr->dawrx0);
75 hr->ciabr = swab64(hr->ciabr);
76 hr->hdec_expiry = swab64(hr->hdec_expiry);
77 hr->purr = swab64(hr->purr);
78 hr->spurr = swab64(hr->spurr);
79 hr->ic = swab64(hr->ic);
80 hr->vtb = swab64(hr->vtb);
81 hr->hdar = swab64(hr->hdar);
82 hr->hdsisr = swab64(hr->hdsisr);
83 hr->heir = swab64(hr->heir);
84 hr->asdr = swab64(hr->asdr);
85 hr->srr0 = swab64(hr->srr0);
86 hr->srr1 = swab64(hr->srr1);
87 hr->sprg[0] = swab64(hr->sprg[0]);
88 hr->sprg[1] = swab64(hr->sprg[1]);
89 hr->sprg[2] = swab64(hr->sprg[2]);
90 hr->sprg[3] = swab64(hr->sprg[3]);
91 hr->pidr = swab64(hr->pidr);
92 hr->cfar = swab64(hr->cfar);
93 hr->ppr = swab64(hr->ppr);
94}
95
Paul Mackerras360cae32018-10-08 16:31:04 +110096static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
97 struct hv_guest_state *hr)
98{
99 struct kvmppc_vcore *vc = vcpu->arch.vcore;
100
101 hr->dpdes = vc->dpdes;
102 hr->hfscr = vcpu->arch.hfscr;
103 hr->purr = vcpu->arch.purr;
104 hr->spurr = vcpu->arch.spurr;
105 hr->ic = vcpu->arch.ic;
106 hr->vtb = vc->vtb;
107 hr->srr0 = vcpu->arch.shregs.srr0;
108 hr->srr1 = vcpu->arch.shregs.srr1;
109 hr->sprg[0] = vcpu->arch.shregs.sprg0;
110 hr->sprg[1] = vcpu->arch.shregs.sprg1;
111 hr->sprg[2] = vcpu->arch.shregs.sprg2;
112 hr->sprg[3] = vcpu->arch.shregs.sprg3;
113 hr->pidr = vcpu->arch.pid;
114 hr->cfar = vcpu->arch.cfar;
115 hr->ppr = vcpu->arch.ppr;
116 switch (trap) {
117 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
118 hr->hdar = vcpu->arch.fault_dar;
119 hr->hdsisr = vcpu->arch.fault_dsisr;
120 hr->asdr = vcpu->arch.fault_gpa;
121 break;
122 case BOOK3S_INTERRUPT_H_INST_STORAGE:
123 hr->asdr = vcpu->arch.fault_gpa;
124 break;
125 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
126 hr->heir = vcpu->arch.emul_inst;
127 break;
128 }
129}
130
Suraj Jitindar Singh73937de2018-10-08 16:31:14 +1100131static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
132{
133 /*
134 * Don't let L1 enable features for L2 which we've disabled for L1,
135 * but preserve the interrupt cause field.
136 */
137 hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
138
139 /* Don't let data address watchpoint match in hypervisor state */
140 hr->dawrx0 &= ~DAWRX_HYP;
141
142 /* Don't let completed instruction address breakpt match in HV state */
143 if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
144 hr->ciabr &= ~CIABR_PRIV;
145}
146
Paul Mackerras360cae32018-10-08 16:31:04 +1100147static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
148{
149 struct kvmppc_vcore *vc = vcpu->arch.vcore;
150
Jordan Niethe13c7bb32019-09-17 10:46:05 +1000151 vc->pcr = hr->pcr | PCR_MASK;
Paul Mackerras360cae32018-10-08 16:31:04 +1100152 vc->dpdes = hr->dpdes;
153 vcpu->arch.hfscr = hr->hfscr;
154 vcpu->arch.dawr = hr->dawr0;
155 vcpu->arch.dawrx = hr->dawrx0;
156 vcpu->arch.ciabr = hr->ciabr;
157 vcpu->arch.purr = hr->purr;
158 vcpu->arch.spurr = hr->spurr;
159 vcpu->arch.ic = hr->ic;
160 vc->vtb = hr->vtb;
161 vcpu->arch.shregs.srr0 = hr->srr0;
162 vcpu->arch.shregs.srr1 = hr->srr1;
163 vcpu->arch.shregs.sprg0 = hr->sprg[0];
164 vcpu->arch.shregs.sprg1 = hr->sprg[1];
165 vcpu->arch.shregs.sprg2 = hr->sprg[2];
166 vcpu->arch.shregs.sprg3 = hr->sprg[3];
167 vcpu->arch.pid = hr->pidr;
168 vcpu->arch.cfar = hr->cfar;
169 vcpu->arch.ppr = hr->ppr;
170}
171
172void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
173 struct hv_guest_state *hr)
174{
175 struct kvmppc_vcore *vc = vcpu->arch.vcore;
176
177 vc->dpdes = hr->dpdes;
178 vcpu->arch.hfscr = hr->hfscr;
179 vcpu->arch.purr = hr->purr;
180 vcpu->arch.spurr = hr->spurr;
181 vcpu->arch.ic = hr->ic;
182 vc->vtb = hr->vtb;
183 vcpu->arch.fault_dar = hr->hdar;
184 vcpu->arch.fault_dsisr = hr->hdsisr;
185 vcpu->arch.fault_gpa = hr->asdr;
186 vcpu->arch.emul_inst = hr->heir;
187 vcpu->arch.shregs.srr0 = hr->srr0;
188 vcpu->arch.shregs.srr1 = hr->srr1;
189 vcpu->arch.shregs.sprg0 = hr->sprg[0];
190 vcpu->arch.shregs.sprg1 = hr->sprg[1];
191 vcpu->arch.shregs.sprg2 = hr->sprg[2];
192 vcpu->arch.shregs.sprg3 = hr->sprg[3];
193 vcpu->arch.pid = hr->pidr;
194 vcpu->arch.cfar = hr->cfar;
195 vcpu->arch.ppr = hr->ppr;
196}
197
Suraj Jitindar Singh873db2c2018-12-14 16:29:08 +1100198static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
199{
200 /* No need to reflect the page fault to L1, we've handled it */
201 vcpu->arch.trap = 0;
202
203 /*
204 * Since the L2 gprs have already been written back into L1 memory when
205 * we complete the mmio, store the L1 memory location of the L2 gpr
206 * being loaded into by the mmio so that the loaded value can be
207 * written there in kvmppc_complete_mmio_load()
208 */
209 if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
210 && (vcpu->mmio_is_write == 0)) {
211 vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
212 offsetof(struct pt_regs,
213 gpr[vcpu->arch.io_gpr]);
214 vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
215 }
216}
217
Paul Mackerras360cae32018-10-08 16:31:04 +1100218long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
219{
220 long int err, r;
221 struct kvm_nested_guest *l2;
222 struct pt_regs l2_regs, saved_l1_regs;
223 struct hv_guest_state l2_hv, saved_l1_hv;
224 struct kvmppc_vcore *vc = vcpu->arch.vcore;
225 u64 hv_ptr, regs_ptr;
226 u64 hdec_exp;
227 s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
228 u64 mask;
229 unsigned long lpcr;
230
231 if (vcpu->kvm->arch.l1_ptcr == 0)
232 return H_NOT_AVAILABLE;
233
234 /* copy parameters in */
235 hv_ptr = kvmppc_get_gpr(vcpu, 4);
236 err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
237 sizeof(struct hv_guest_state));
238 if (err)
239 return H_PARAMETER;
Suraj Jitindar Singh10b50222018-10-08 16:31:15 +1100240 if (kvmppc_need_byteswap(vcpu))
241 byteswap_hv_regs(&l2_hv);
Paul Mackerras360cae32018-10-08 16:31:04 +1100242 if (l2_hv.version != HV_GUEST_STATE_VERSION)
243 return H_P2;
244
245 regs_ptr = kvmppc_get_gpr(vcpu, 5);
246 err = kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
247 sizeof(struct pt_regs));
248 if (err)
249 return H_PARAMETER;
Suraj Jitindar Singh10b50222018-10-08 16:31:15 +1100250 if (kvmppc_need_byteswap(vcpu))
251 byteswap_pt_regs(&l2_regs);
Suraj Jitindar Singh9d0b0482018-10-08 16:31:11 +1100252 if (l2_hv.vcpu_token >= NR_CPUS)
253 return H_PARAMETER;
254
Paul Mackerras360cae32018-10-08 16:31:04 +1100255 /* translate lpid */
256 l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
257 if (!l2)
258 return H_PARAMETER;
259 if (!l2->l1_gr_to_hr) {
260 mutex_lock(&l2->tlb_lock);
261 kvmhv_update_ptbl_cache(l2);
262 mutex_unlock(&l2->tlb_lock);
263 }
264
265 /* save l1 values of things */
266 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
267 saved_l1_regs = vcpu->arch.regs;
268 kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
269
270 /* convert TB values/offsets to host (L0) values */
271 hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
272 vc->tb_offset += l2_hv.tb_offset;
273
274 /* set L1 state to L2 state */
275 vcpu->arch.nested = l2;
276 vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
277 vcpu->arch.regs = l2_regs;
278 vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
279 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
280 LPCR_LPES | LPCR_MER;
281 lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
Suraj Jitindar Singh73937de2018-10-08 16:31:14 +1100282 sanitise_hv_regs(vcpu, &l2_hv);
Paul Mackerras360cae32018-10-08 16:31:04 +1100283 restore_hv_regs(vcpu, &l2_hv);
284
285 vcpu->arch.ret = RESUME_GUEST;
286 vcpu->arch.trap = 0;
287 do {
288 if (mftb() >= hdec_exp) {
289 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
290 r = RESUME_HOST;
291 break;
292 }
293 r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
294 lpcr);
295 } while (is_kvmppc_resume_guest(r));
296
297 /* save L2 state for return */
298 l2_regs = vcpu->arch.regs;
299 l2_regs.msr = vcpu->arch.shregs.msr;
300 delta_purr = vcpu->arch.purr - l2_hv.purr;
301 delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
302 delta_ic = vcpu->arch.ic - l2_hv.ic;
303 delta_vtb = vc->vtb - l2_hv.vtb;
304 save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
305
306 /* restore L1 state */
307 vcpu->arch.nested = NULL;
308 vcpu->arch.regs = saved_l1_regs;
309 vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
310 /* set L1 MSR TS field according to L2 transaction state */
311 if (l2_regs.msr & MSR_TS_MASK)
312 vcpu->arch.shregs.msr |= MSR_TS_S;
313 vc->tb_offset = saved_l1_hv.tb_offset;
314 restore_hv_regs(vcpu, &saved_l1_hv);
315 vcpu->arch.purr += delta_purr;
316 vcpu->arch.spurr += delta_spurr;
317 vcpu->arch.ic += delta_ic;
318 vc->vtb += delta_vtb;
319
320 kvmhv_put_nested(l2);
321
322 /* copy l2_hv_state and regs back to guest */
Suraj Jitindar Singh10b50222018-10-08 16:31:15 +1100323 if (kvmppc_need_byteswap(vcpu)) {
324 byteswap_hv_regs(&l2_hv);
325 byteswap_pt_regs(&l2_regs);
326 }
Paul Mackerras360cae32018-10-08 16:31:04 +1100327 err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
328 sizeof(struct hv_guest_state));
329 if (err)
330 return H_AUTHORITY;
331 err = kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
332 sizeof(struct pt_regs));
333 if (err)
334 return H_AUTHORITY;
335
336 if (r == -EINTR)
337 return H_INTERRUPT;
338
Suraj Jitindar Singh873db2c2018-12-14 16:29:08 +1100339 if (vcpu->mmio_needed) {
340 kvmhv_nested_mmio_needed(vcpu, regs_ptr);
341 return H_TOO_HARD;
342 }
343
Paul Mackerras360cae32018-10-08 16:31:04 +1100344 return vcpu->arch.trap;
345}
346
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100347long kvmhv_nested_init(void)
348{
349 long int ptb_order;
350 unsigned long ptcr;
351 long rc;
352
353 if (!kvmhv_on_pseries())
354 return 0;
355 if (!radix_enabled())
356 return -ENODEV;
357
358 /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
359 ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
360 if (ptb_order < 8)
361 ptb_order = 8;
362 pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
363 GFP_KERNEL);
364 if (!pseries_partition_tb) {
365 pr_err("kvm-hv: failed to allocated nested partition table\n");
366 return -ENOMEM;
367 }
368
369 ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
370 rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
371 if (rc != H_SUCCESS) {
372 pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
373 rc);
374 kfree(pseries_partition_tb);
375 pseries_partition_tb = NULL;
376 return -ENODEV;
377 }
378
379 return 0;
380}
381
382void kvmhv_nested_exit(void)
383{
384 /*
385 * N.B. the kvmhv_on_pseries() test is there because it enables
386 * the compiler to remove the call to plpar_hcall_norets()
387 * when CONFIG_PPC_PSERIES=n.
388 */
389 if (kvmhv_on_pseries() && pseries_partition_tb) {
390 plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
391 kfree(pseries_partition_tb);
392 pseries_partition_tb = NULL;
393 }
394}
395
Paul Mackerras690ed4c2018-10-08 16:31:10 +1100396static void kvmhv_flush_lpid(unsigned int lpid)
397{
398 long rc;
399
400 if (!kvmhv_on_pseries()) {
Nicholas Piggin99161de2019-09-03 01:29:27 +1000401 radix__flush_all_lpid(lpid);
Paul Mackerras690ed4c2018-10-08 16:31:10 +1100402 return;
403 }
404
405 rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
406 lpid, TLBIEL_INVAL_SET_LPID);
407 if (rc)
408 pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
409}
410
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100411void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
412{
Paul Mackerras690ed4c2018-10-08 16:31:10 +1100413 if (!kvmhv_on_pseries()) {
Nicholas Pigginfd13dae2019-09-03 01:29:28 +1000414 mmu_partition_table_set_entry(lpid, dw0, dw1, true);
Paul Mackerras690ed4c2018-10-08 16:31:10 +1100415 return;
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100416 }
Paul Mackerras690ed4c2018-10-08 16:31:10 +1100417
418 pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
419 pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
420 /* L0 will do the necessary barriers */
421 kvmhv_flush_lpid(lpid);
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100422}
423
424static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
425{
426 unsigned long dw0;
427
428 dw0 = PATB_HR | radix__get_tree_size() |
429 __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
430 kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
431}
432
433void kvmhv_vm_nested_init(struct kvm *kvm)
434{
435 kvm->arch.max_nested_lpid = -1;
436}
437
438/*
439 * Handle the H_SET_PARTITION_TABLE hcall.
440 * r4 = guest real address of partition table + log_2(size) - 12
441 * (formatted as for the PTCR).
442 */
443long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
444{
445 struct kvm *kvm = vcpu->kvm;
446 unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
447 int srcu_idx;
448 long ret = H_SUCCESS;
449
450 srcu_idx = srcu_read_lock(&kvm->srcu);
451 /*
452 * Limit the partition table to 4096 entries (because that's what
453 * hardware supports), and check the base address.
454 */
455 if ((ptcr & PRTS_MASK) > 12 - 8 ||
456 !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
457 ret = H_PARAMETER;
458 srcu_read_unlock(&kvm->srcu, srcu_idx);
459 if (ret == H_SUCCESS)
460 kvm->arch.l1_ptcr = ptcr;
461 return ret;
462}
463
464/*
Suraj Jitindar Singh6ff887b2018-12-14 16:29:09 +1100465 * Handle the H_COPY_TOFROM_GUEST hcall.
466 * r4 = L1 lpid of nested guest
467 * r5 = pid
468 * r6 = eaddr to access
469 * r7 = to buffer (L1 gpa)
470 * r8 = from buffer (L1 gpa)
471 * r9 = n bytes to copy
472 */
473long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
474{
475 struct kvm_nested_guest *gp;
476 int l1_lpid = kvmppc_get_gpr(vcpu, 4);
477 int pid = kvmppc_get_gpr(vcpu, 5);
478 gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
479 gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
480 gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
481 void *buf;
482 unsigned long n = kvmppc_get_gpr(vcpu, 9);
483 bool is_load = !!gp_to;
484 long rc;
485
486 if (gp_to && gp_from) /* One must be NULL to determine the direction */
487 return H_PARAMETER;
488
489 if (eaddr & (0xFFFUL << 52))
490 return H_PARAMETER;
491
492 buf = kzalloc(n, GFP_KERNEL);
493 if (!buf)
494 return H_NO_MEM;
495
496 gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
497 if (!gp) {
498 rc = H_PARAMETER;
499 goto out_free;
500 }
501
502 mutex_lock(&gp->tlb_lock);
503
504 if (is_load) {
505 /* Load from the nested guest into our buffer */
506 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
507 eaddr, buf, NULL, n);
508 if (rc)
509 goto not_found;
510
511 /* Write what was loaded into our buffer back to the L1 guest */
512 rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
513 if (rc)
514 goto not_found;
515 } else {
516 /* Load the data to be stored from the L1 guest into our buf */
517 rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
518 if (rc)
519 goto not_found;
520
521 /* Store from our buffer into the nested guest */
522 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
523 eaddr, NULL, buf, n);
524 if (rc)
525 goto not_found;
526 }
527
528out_unlock:
529 mutex_unlock(&gp->tlb_lock);
530 kvmhv_put_nested(gp);
531out_free:
532 kfree(buf);
533 return rc;
534not_found:
535 rc = H_NOT_FOUND;
536 goto out_unlock;
537}
538
539/*
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100540 * Reload the partition table entry for a guest.
541 * Caller must hold gp->tlb_lock.
542 */
543static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
544{
545 int ret;
546 struct patb_entry ptbl_entry;
547 unsigned long ptbl_addr;
548 struct kvm *kvm = gp->l1_host;
549
550 ret = -EFAULT;
551 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
552 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8)))
553 ret = kvm_read_guest(kvm, ptbl_addr,
554 &ptbl_entry, sizeof(ptbl_entry));
555 if (ret) {
556 gp->l1_gr_to_hr = 0;
557 gp->process_table = 0;
558 } else {
559 gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
560 gp->process_table = be64_to_cpu(ptbl_entry.patb1);
561 }
562 kvmhv_set_nested_ptbl(gp);
563}
564
565struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
566{
567 struct kvm_nested_guest *gp;
568 long shadow_lpid;
569
570 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
571 if (!gp)
572 return NULL;
573 gp->l1_host = kvm;
574 gp->l1_lpid = lpid;
575 mutex_init(&gp->tlb_lock);
576 gp->shadow_pgtable = pgd_alloc(kvm->mm);
577 if (!gp->shadow_pgtable)
578 goto out_free;
579 shadow_lpid = kvmppc_alloc_lpid();
580 if (shadow_lpid < 0)
581 goto out_free2;
582 gp->shadow_lpid = shadow_lpid;
Suraj Jitindar Singhd232afe2018-12-14 16:29:04 +1100583 gp->radix = 1;
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100584
Suraj Jitindar Singh9d0b0482018-10-08 16:31:11 +1100585 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
586
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100587 return gp;
588
589 out_free2:
590 pgd_free(kvm->mm, gp->shadow_pgtable);
591 out_free:
592 kfree(gp);
593 return NULL;
594}
595
596/*
597 * Free up any resources allocated for a nested guest.
598 */
599static void kvmhv_release_nested(struct kvm_nested_guest *gp)
600{
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +1100601 struct kvm *kvm = gp->l1_host;
602
603 if (gp->shadow_pgtable) {
604 /*
605 * No vcpu is using this struct and no call to
606 * kvmhv_get_nested can find this struct,
607 * so we don't need to hold kvm->mmu_lock.
608 */
609 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
610 gp->shadow_lpid);
611 pgd_free(kvm->mm, gp->shadow_pgtable);
612 }
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100613 kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
614 kvmppc_free_lpid(gp->shadow_lpid);
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100615 kfree(gp);
616}
617
618static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
619{
620 struct kvm *kvm = gp->l1_host;
621 int lpid = gp->l1_lpid;
622 long ref;
623
624 spin_lock(&kvm->mmu_lock);
625 if (gp == kvm->arch.nested_guests[lpid]) {
626 kvm->arch.nested_guests[lpid] = NULL;
627 if (lpid == kvm->arch.max_nested_lpid) {
628 while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
629 ;
630 kvm->arch.max_nested_lpid = lpid;
631 }
632 --gp->refcnt;
633 }
634 ref = gp->refcnt;
635 spin_unlock(&kvm->mmu_lock);
636 if (ref == 0)
637 kvmhv_release_nested(gp);
638}
639
640/*
641 * Free up all nested resources allocated for this guest.
642 * This is called with no vcpus of the guest running, when
643 * switching the guest to HPT mode or when destroying the
644 * guest.
645 */
646void kvmhv_release_all_nested(struct kvm *kvm)
647{
648 int i;
649 struct kvm_nested_guest *gp;
650 struct kvm_nested_guest *freelist = NULL;
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +1100651 struct kvm_memory_slot *memslot;
652 int srcu_idx;
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100653
654 spin_lock(&kvm->mmu_lock);
655 for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
656 gp = kvm->arch.nested_guests[i];
657 if (!gp)
658 continue;
659 kvm->arch.nested_guests[i] = NULL;
660 if (--gp->refcnt == 0) {
661 gp->next = freelist;
662 freelist = gp;
663 }
664 }
665 kvm->arch.max_nested_lpid = -1;
666 spin_unlock(&kvm->mmu_lock);
667 while ((gp = freelist) != NULL) {
668 freelist = gp->next;
669 kvmhv_release_nested(gp);
670 }
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +1100671
672 srcu_idx = srcu_read_lock(&kvm->srcu);
673 kvm_for_each_memslot(memslot, kvm_memslots(kvm))
674 kvmhv_free_memslot_nest_rmap(memslot);
675 srcu_read_unlock(&kvm->srcu, srcu_idx);
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100676}
677
678/* caller must hold gp->tlb_lock */
Suraj Jitindar Singhe3b6b462018-10-08 16:31:09 +1100679static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100680{
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +1100681 struct kvm *kvm = gp->l1_host;
682
683 spin_lock(&kvm->mmu_lock);
684 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
685 spin_unlock(&kvm->mmu_lock);
Paul Mackerras690ed4c2018-10-08 16:31:10 +1100686 kvmhv_flush_lpid(gp->shadow_lpid);
Paul Mackerras8e3f5fc2018-10-08 16:31:03 +1100687 kvmhv_update_ptbl_cache(gp);
688 if (gp->l1_gr_to_hr == 0)
689 kvmhv_remove_nested(gp);
690}
691
692struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
693 bool create)
694{
695 struct kvm_nested_guest *gp, *newgp;
696
697 if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
698 l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
699 return NULL;
700
701 spin_lock(&kvm->mmu_lock);
702 gp = kvm->arch.nested_guests[l1_lpid];
703 if (gp)
704 ++gp->refcnt;
705 spin_unlock(&kvm->mmu_lock);
706
707 if (gp || !create)
708 return gp;
709
710 newgp = kvmhv_alloc_nested(kvm, l1_lpid);
711 if (!newgp)
712 return NULL;
713 spin_lock(&kvm->mmu_lock);
714 if (kvm->arch.nested_guests[l1_lpid]) {
715 /* someone else beat us to it */
716 gp = kvm->arch.nested_guests[l1_lpid];
717 } else {
718 kvm->arch.nested_guests[l1_lpid] = newgp;
719 ++newgp->refcnt;
720 gp = newgp;
721 newgp = NULL;
722 if (l1_lpid > kvm->arch.max_nested_lpid)
723 kvm->arch.max_nested_lpid = l1_lpid;
724 }
725 ++gp->refcnt;
726 spin_unlock(&kvm->mmu_lock);
727
728 if (newgp)
729 kvmhv_release_nested(newgp);
730
731 return gp;
732}
733
734void kvmhv_put_nested(struct kvm_nested_guest *gp)
735{
736 struct kvm *kvm = gp->l1_host;
737 long ref;
738
739 spin_lock(&kvm->mmu_lock);
740 ref = --gp->refcnt;
741 spin_unlock(&kvm->mmu_lock);
742 if (ref == 0)
743 kvmhv_release_nested(gp);
744}
Paul Mackerras360cae32018-10-08 16:31:04 +1100745
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +1100746static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
747{
748 if (lpid > kvm->arch.max_nested_lpid)
749 return NULL;
750 return kvm->arch.nested_guests[lpid];
751}
752
Aneesh Kumar K.V6cdf3032020-05-05 12:47:18 +0530753pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
754 unsigned long ea, unsigned *hshift)
Aneesh Kumar K.Vdc891842020-05-05 12:47:17 +0530755{
756 struct kvm_nested_guest *gp;
757 pte_t *pte;
758
759 gp = kvmhv_find_nested(kvm, lpid);
760 if (!gp)
761 return NULL;
762
763 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
764 "%s called with kvm mmu_lock not held \n", __func__);
765 pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
766
767 return pte;
768}
769
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +1100770static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
771{
772 return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
773 RMAP_NESTED_GPA_MASK));
774}
775
776void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
777 struct rmap_nested **n_rmap)
778{
779 struct llist_node *entry = ((struct llist_head *) rmapp)->first;
780 struct rmap_nested *cursor;
781 u64 rmap, new_rmap = (*n_rmap)->rmap;
782
783 /* Are there any existing entries? */
784 if (!(*rmapp)) {
785 /* No -> use the rmap as a single entry */
786 *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
787 return;
788 }
789
790 /* Do any entries match what we're trying to insert? */
791 for_each_nest_rmap_safe(cursor, entry, &rmap) {
792 if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
793 return;
794 }
795
796 /* Do we need to create a list or just add the new entry? */
797 rmap = *rmapp;
798 if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
799 *rmapp = 0UL;
800 llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
801 if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
802 (*n_rmap)->list.next = (struct llist_node *) rmap;
803
804 /* Set NULL so not freed by caller */
805 *n_rmap = NULL;
806}
807
Suraj Jitindar Singh90165d32018-12-21 14:28:42 +1100808static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
809 unsigned long clr, unsigned long set,
810 unsigned long hpa, unsigned long mask)
811{
Suraj Jitindar Singh90165d32018-12-21 14:28:42 +1100812 unsigned long gpa;
813 unsigned int shift, lpid;
814 pte_t *ptep;
815
816 gpa = n_rmap & RMAP_NESTED_GPA_MASK;
817 lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
Suraj Jitindar Singh90165d32018-12-21 14:28:42 +1100818
819 /* Find the pte */
Aneesh Kumar K.Vdc891842020-05-05 12:47:17 +0530820 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
Suraj Jitindar Singh90165d32018-12-21 14:28:42 +1100821 /*
822 * If the pte is present and the pfn is still the same, update the pte.
823 * If the pfn has changed then this is a stale rmap entry, the nested
824 * gpa actually points somewhere else now, and there is nothing to do.
825 * XXX A future optimisation would be to remove the rmap entry here.
826 */
827 if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
828 __radix_pte_update(ptep, clr, set);
829 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
830 }
831}
832
833/*
834 * For a given list of rmap entries, update the rc bits in all ptes in shadow
835 * page tables for nested guests which are referenced by the rmap list.
836 */
837void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
838 unsigned long clr, unsigned long set,
839 unsigned long hpa, unsigned long nbytes)
840{
841 struct llist_node *entry = ((struct llist_head *) rmapp)->first;
842 struct rmap_nested *cursor;
843 unsigned long rmap, mask;
844
845 if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
846 return;
847
848 mask = PTE_RPN_MASK & ~(nbytes - 1);
849 hpa &= mask;
850
851 for_each_nest_rmap_safe(cursor, entry, &rmap)
852 kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
853}
854
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +1100855static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
856 unsigned long hpa, unsigned long mask)
857{
858 struct kvm_nested_guest *gp;
859 unsigned long gpa;
860 unsigned int shift, lpid;
861 pte_t *ptep;
862
863 gpa = n_rmap & RMAP_NESTED_GPA_MASK;
864 lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
865 gp = kvmhv_find_nested(kvm, lpid);
866 if (!gp)
867 return;
868
869 /* Find and invalidate the pte */
Aneesh Kumar K.Vdc891842020-05-05 12:47:17 +0530870 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +1100871 /* Don't spuriously invalidate ptes if the pfn has changed */
872 if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
873 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
874}
875
876static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
877 unsigned long hpa, unsigned long mask)
878{
879 struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
880 struct rmap_nested *cursor;
881 unsigned long rmap;
882
883 for_each_nest_rmap_safe(cursor, entry, &rmap) {
884 kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
885 kfree(cursor);
886 }
887}
888
889/* called with kvm->mmu_lock held */
890void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
Paul Mackerrasc43c3a82018-12-12 15:16:48 +1100891 const struct kvm_memory_slot *memslot,
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +1100892 unsigned long gpa, unsigned long hpa,
893 unsigned long nbytes)
894{
895 unsigned long gfn, end_gfn;
896 unsigned long addr_mask;
897
898 if (!memslot)
899 return;
900 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
901 end_gfn = gfn + (nbytes >> PAGE_SHIFT);
902
903 addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
904 hpa &= addr_mask;
905
906 for (; gfn < end_gfn; gfn++) {
907 unsigned long *rmap = &memslot->arch.rmap[gfn];
908 kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
909 }
910}
911
912static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
913{
914 unsigned long page;
915
916 for (page = 0; page < free->npages; page++) {
917 unsigned long rmap, *rmapp = &free->arch.rmap[page];
918 struct rmap_nested *cursor;
919 struct llist_node *entry;
920
921 entry = llist_del_all((struct llist_head *) rmapp);
922 for_each_nest_rmap_safe(cursor, entry, &rmap)
923 kfree(cursor);
924 }
925}
926
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +1100927static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
928 struct kvm_nested_guest *gp,
929 long gpa, int *shift_ret)
Paul Mackerras360cae32018-10-08 16:31:04 +1100930{
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +1100931 struct kvm *kvm = vcpu->kvm;
932 bool ret = false;
933 pte_t *ptep;
934 int shift;
935
936 spin_lock(&kvm->mmu_lock);
Aneesh Kumar K.Vdc891842020-05-05 12:47:17 +0530937 ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +1100938 if (!shift)
939 shift = PAGE_SHIFT;
940 if (ptep && pte_present(*ptep)) {
941 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
942 ret = true;
943 }
944 spin_unlock(&kvm->mmu_lock);
945
946 if (shift_ret)
947 *shift_ret = shift;
948 return ret;
949}
950
Suraj Jitindar Singhe3b6b462018-10-08 16:31:09 +1100951static inline int get_ric(unsigned int instr)
952{
953 return (instr >> 18) & 0x3;
954}
955
956static inline int get_prs(unsigned int instr)
957{
958 return (instr >> 17) & 0x1;
959}
960
961static inline int get_r(unsigned int instr)
962{
963 return (instr >> 16) & 0x1;
964}
965
966static inline int get_lpid(unsigned long r_val)
967{
968 return r_val & 0xffffffff;
969}
970
971static inline int get_is(unsigned long r_val)
972{
973 return (r_val >> 10) & 0x3;
974}
975
976static inline int get_ap(unsigned long r_val)
977{
978 return (r_val >> 5) & 0x7;
979}
980
981static inline long get_epn(unsigned long r_val)
982{
983 return r_val >> 12;
984}
985
986static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
987 int ap, long epn)
988{
989 struct kvm *kvm = vcpu->kvm;
990 struct kvm_nested_guest *gp;
991 long npages;
992 int shift, shadow_shift;
993 unsigned long addr;
994
995 shift = ap_to_shift(ap);
996 addr = epn << 12;
997 if (shift < 0)
998 /* Invalid ap encoding */
999 return -EINVAL;
1000
1001 addr &= ~((1UL << shift) - 1);
1002 npages = 1UL << (shift - PAGE_SHIFT);
1003
1004 gp = kvmhv_get_nested(kvm, lpid, false);
1005 if (!gp) /* No such guest -> nothing to do */
1006 return 0;
1007 mutex_lock(&gp->tlb_lock);
1008
1009 /* There may be more than one host page backing this single guest pte */
1010 do {
1011 kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1012
1013 npages -= 1UL << (shadow_shift - PAGE_SHIFT);
1014 addr += 1UL << shadow_shift;
1015 } while (npages > 0);
1016
1017 mutex_unlock(&gp->tlb_lock);
1018 kvmhv_put_nested(gp);
1019 return 0;
1020}
1021
1022static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
1023 struct kvm_nested_guest *gp, int ric)
1024{
1025 struct kvm *kvm = vcpu->kvm;
1026
1027 mutex_lock(&gp->tlb_lock);
1028 switch (ric) {
1029 case 0:
1030 /* Invalidate TLB */
1031 spin_lock(&kvm->mmu_lock);
1032 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1033 gp->shadow_lpid);
Paul Mackerras690ed4c2018-10-08 16:31:10 +11001034 kvmhv_flush_lpid(gp->shadow_lpid);
Suraj Jitindar Singhe3b6b462018-10-08 16:31:09 +11001035 spin_unlock(&kvm->mmu_lock);
1036 break;
1037 case 1:
1038 /*
1039 * Invalidate PWC
1040 * We don't cache this -> nothing to do
1041 */
1042 break;
1043 case 2:
1044 /* Invalidate TLB, PWC and caching of partition table entries */
1045 kvmhv_flush_nested(gp);
1046 break;
1047 default:
1048 break;
1049 }
1050 mutex_unlock(&gp->tlb_lock);
1051}
1052
1053static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
1054{
1055 struct kvm *kvm = vcpu->kvm;
1056 struct kvm_nested_guest *gp;
1057 int i;
1058
1059 spin_lock(&kvm->mmu_lock);
1060 for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
1061 gp = kvm->arch.nested_guests[i];
1062 if (gp) {
1063 spin_unlock(&kvm->mmu_lock);
1064 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1065 spin_lock(&kvm->mmu_lock);
1066 }
1067 }
1068 spin_unlock(&kvm->mmu_lock);
1069}
1070
1071static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
1072 unsigned long rsval, unsigned long rbval)
1073{
1074 struct kvm *kvm = vcpu->kvm;
1075 struct kvm_nested_guest *gp;
1076 int r, ric, prs, is, ap;
1077 int lpid;
1078 long epn;
1079 int ret = 0;
1080
1081 ric = get_ric(instr);
1082 prs = get_prs(instr);
1083 r = get_r(instr);
1084 lpid = get_lpid(rsval);
1085 is = get_is(rbval);
1086
1087 /*
1088 * These cases are invalid and are not handled:
1089 * r != 1 -> Only radix supported
1090 * prs == 1 -> Not HV privileged
1091 * ric == 3 -> No cluster bombs for radix
1092 * is == 1 -> Partition scoped translations not associated with pid
1093 * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
1094 */
1095 if ((!r) || (prs) || (ric == 3) || (is == 1) ||
1096 ((!is) && (ric == 1 || ric == 2)))
1097 return -EINVAL;
1098
1099 switch (is) {
1100 case 0:
1101 /*
1102 * We know ric == 0
1103 * Invalidate TLB for a given target address
1104 */
1105 epn = get_epn(rbval);
1106 ap = get_ap(rbval);
1107 ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1108 break;
1109 case 2:
1110 /* Invalidate matching LPID */
1111 gp = kvmhv_get_nested(kvm, lpid, false);
1112 if (gp) {
1113 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1114 kvmhv_put_nested(gp);
1115 }
1116 break;
1117 case 3:
1118 /* Invalidate ALL LPIDs */
1119 kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
1120 break;
1121 default:
1122 ret = -EINVAL;
1123 break;
1124 }
1125
1126 return ret;
1127}
1128
1129/*
1130 * This handles the H_TLB_INVALIDATE hcall.
1131 * Parameters are (r4) tlbie instruction code, (r5) rS contents,
1132 * (r6) rB contents.
1133 */
1134long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
1135{
1136 int ret;
1137
1138 ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
1139 kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
1140 if (ret)
1141 return H_PARAMETER;
1142 return H_SUCCESS;
1143}
1144
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001145/* Used to convert a nested guest real address to a L1 guest real address */
1146static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
1147 struct kvm_nested_guest *gp,
1148 unsigned long n_gpa, unsigned long dsisr,
1149 struct kvmppc_pte *gpte_p)
1150{
1151 u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
1152 int ret;
1153
1154 ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1155 &fault_addr);
1156
1157 if (ret) {
1158 /* We didn't find a pte */
1159 if (ret == -EINVAL) {
1160 /* Unsupported mmu config */
1161 flags |= DSISR_UNSUPP_MMU;
1162 } else if (ret == -ENOENT) {
1163 /* No translation found */
1164 flags |= DSISR_NOHPTE;
1165 } else if (ret == -EFAULT) {
1166 /* Couldn't access L1 real address */
1167 flags |= DSISR_PRTABLE_FAULT;
1168 vcpu->arch.fault_gpa = fault_addr;
1169 } else {
1170 /* Unknown error */
1171 return ret;
1172 }
1173 goto forward_to_l1;
1174 } else {
1175 /* We found a pte -> check permissions */
1176 if (dsisr & DSISR_ISSTORE) {
1177 /* Can we write? */
1178 if (!gpte_p->may_write) {
1179 flags |= DSISR_PROTFAULT;
1180 goto forward_to_l1;
1181 }
1182 } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1183 /* Can we execute? */
1184 if (!gpte_p->may_execute) {
Jordan Nietheb6915052020-05-06 13:40:42 +10001185 flags |= SRR1_ISI_N_G_OR_CIP;
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001186 goto forward_to_l1;
1187 }
1188 } else {
1189 /* Can we read? */
1190 if (!gpte_p->may_read && !gpte_p->may_write) {
1191 flags |= DSISR_PROTFAULT;
1192 goto forward_to_l1;
1193 }
1194 }
1195 }
1196
1197 return 0;
1198
1199forward_to_l1:
1200 vcpu->arch.fault_dsisr = flags;
1201 if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
Nicholas Piggin9ee64712019-10-02 16:00:21 +10001202 vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001203 vcpu->arch.shregs.msr |= flags;
1204 }
Paul Mackerras360cae32018-10-08 16:31:04 +11001205 return RESUME_HOST;
1206}
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001207
1208static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
1209 struct kvm_nested_guest *gp,
1210 unsigned long n_gpa,
1211 struct kvmppc_pte gpte,
1212 unsigned long dsisr)
1213{
1214 struct kvm *kvm = vcpu->kvm;
1215 bool writing = !!(dsisr & DSISR_ISSTORE);
1216 u64 pgflags;
Suraj Jitindar Singhbec6e032018-12-21 14:28:39 +11001217 long ret;
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001218
1219 /* Are the rc bits set in the L1 partition scoped pte? */
1220 pgflags = _PAGE_ACCESSED;
1221 if (writing)
1222 pgflags |= _PAGE_DIRTY;
1223 if (pgflags & ~gpte.rc)
1224 return RESUME_HOST;
1225
1226 spin_lock(&kvm->mmu_lock);
1227 /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
Aneesh Kumar K.V6cdf3032020-05-05 12:47:18 +05301228 ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1229 gpte.raddr, kvm->arch.lpid);
Suraj Jitindar Singhbec6e032018-12-21 14:28:39 +11001230 if (!ret) {
1231 ret = -EINVAL;
1232 goto out_unlock;
1233 }
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001234
1235 /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
Aneesh Kumar K.V6cdf3032020-05-05 12:47:18 +05301236 ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1237 n_gpa, gp->shadow_lpid);
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001238 if (!ret)
Suraj Jitindar Singhbec6e032018-12-21 14:28:39 +11001239 ret = -EINVAL;
1240 else
1241 ret = 0;
1242
1243out_unlock:
1244 spin_unlock(&kvm->mmu_lock);
1245 return ret;
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001246}
1247
1248static inline int kvmppc_radix_level_to_shift(int level)
1249{
1250 switch (level) {
1251 case 2:
1252 return PUD_SHIFT;
1253 case 1:
1254 return PMD_SHIFT;
1255 default:
1256 return PAGE_SHIFT;
1257 }
1258}
1259
1260static inline int kvmppc_radix_shift_to_level(int shift)
1261{
1262 if (shift == PUD_SHIFT)
1263 return 2;
1264 if (shift == PMD_SHIFT)
1265 return 1;
1266 if (shift == PAGE_SHIFT)
1267 return 0;
1268 WARN_ON_ONCE(1);
1269 return 0;
1270}
1271
1272/* called with gp->tlb_lock held */
Suraj Jitindar Singh873db2c2018-12-14 16:29:08 +11001273static long int __kvmhv_nested_page_fault(struct kvm_run *run,
1274 struct kvm_vcpu *vcpu,
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001275 struct kvm_nested_guest *gp)
1276{
1277 struct kvm *kvm = vcpu->kvm;
1278 struct kvm_memory_slot *memslot;
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +11001279 struct rmap_nested *n_rmap;
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001280 struct kvmppc_pte gpte;
1281 pte_t pte, *pte_p;
1282 unsigned long mmu_seq;
1283 unsigned long dsisr = vcpu->arch.fault_dsisr;
1284 unsigned long ea = vcpu->arch.fault_dar;
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +11001285 unsigned long *rmapp;
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001286 unsigned long n_gpa, gpa, gfn, perm = 0UL;
1287 unsigned int shift, l1_shift, level;
1288 bool writing = !!(dsisr & DSISR_ISSTORE);
1289 bool kvm_ro = false;
1290 long int ret;
1291
1292 if (!gp->l1_gr_to_hr) {
1293 kvmhv_update_ptbl_cache(gp);
1294 if (!gp->l1_gr_to_hr)
1295 return RESUME_HOST;
1296 }
1297
1298 /* Convert the nested guest real address into a L1 guest real address */
1299
1300 n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1301 if (!(dsisr & DSISR_PRTABLE_FAULT))
1302 n_gpa |= ea & 0xFFF;
1303 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1304
1305 /*
1306 * If the hardware found a translation but we don't now have a usable
1307 * translation in the l1 partition-scoped tree, remove the shadow pte
1308 * and let the guest retry.
1309 */
1310 if (ret == RESUME_HOST &&
1311 (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
1312 DSISR_BAD_COPYPASTE)))
1313 goto inval;
1314 if (ret)
1315 return ret;
1316
1317 /* Failed to set the reference/change bits */
1318 if (dsisr & DSISR_SET_RC) {
1319 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1320 if (ret == RESUME_HOST)
1321 return ret;
1322 if (ret)
1323 goto inval;
1324 dsisr &= ~DSISR_SET_RC;
1325 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1326 DSISR_PROTFAULT)))
1327 return RESUME_GUEST;
1328 }
1329
1330 /*
1331 * We took an HISI or HDSI while we were running a nested guest which
1332 * means we have no partition scoped translation for that. This means
1333 * we need to insert a pte for the mapping into our shadow_pgtable.
1334 */
1335
1336 l1_shift = gpte.page_shift;
1337 if (l1_shift < PAGE_SHIFT) {
1338 /* We don't support l1 using a page size smaller than our own */
1339 pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1340 l1_shift, PAGE_SHIFT);
1341 return -EINVAL;
1342 }
1343 gpa = gpte.raddr;
1344 gfn = gpa >> PAGE_SHIFT;
1345
1346 /* 1. Get the corresponding host memslot */
1347
1348 memslot = gfn_to_memslot(kvm, gfn);
1349 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
1350 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
1351 /* unusual error -> reflect to the guest as a DSI */
1352 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
1353 return RESUME_GUEST;
1354 }
Suraj Jitindar Singh873db2c2018-12-14 16:29:08 +11001355
1356 /* passthrough of emulated MMIO case */
Suraj Jitindar Singh873db2c2018-12-14 16:29:08 +11001357 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001358 }
1359 if (memslot->flags & KVM_MEM_READONLY) {
1360 if (writing) {
1361 /* Give the guest a DSI */
1362 kvmppc_core_queue_data_storage(vcpu, ea,
1363 DSISR_ISSTORE | DSISR_PROTFAULT);
1364 return RESUME_GUEST;
1365 }
1366 kvm_ro = true;
1367 }
1368
1369 /* 2. Find the host pte for this L1 guest real address */
1370
1371 /* Used to check for invalidations in progress */
1372 mmu_seq = kvm->mmu_notifier_seq;
1373 smp_rmb();
1374
1375 /* See if can find translation in our partition scoped tables for L1 */
1376 pte = __pte(0);
1377 spin_lock(&kvm->mmu_lock);
Aneesh Kumar K.V4b994122020-05-05 12:47:16 +05301378 pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001379 if (!shift)
1380 shift = PAGE_SHIFT;
1381 if (pte_p)
1382 pte = *pte_p;
1383 spin_unlock(&kvm->mmu_lock);
1384
1385 if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
1386 /* No suitable pte found -> try to insert a mapping */
1387 ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
1388 writing, kvm_ro, &pte, &level);
1389 if (ret == -EAGAIN)
1390 return RESUME_GUEST;
1391 else if (ret)
1392 return ret;
1393 shift = kvmppc_radix_level_to_shift(level);
1394 }
Suraj Jitindar Singh8400f872018-12-21 14:28:40 +11001395 /* Align gfn to the start of the page */
1396 gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001397
1398 /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
1399
1400 /* The permissions is the combination of the host and l1 guest ptes */
1401 perm |= gpte.may_read ? 0UL : _PAGE_READ;
1402 perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
1403 perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
Suraj Jitindar Singh8b23eee2018-12-21 14:28:41 +11001404 /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
1405 perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
1406 perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001407 pte = __pte(pte_val(pte) & ~perm);
1408
1409 /* What size pte can we insert? */
1410 if (shift > l1_shift) {
1411 u64 mask;
1412 unsigned int actual_shift = PAGE_SHIFT;
1413 if (PMD_SHIFT < l1_shift)
1414 actual_shift = PMD_SHIFT;
1415 mask = (1UL << shift) - (1UL << actual_shift);
1416 pte = __pte(pte_val(pte) | (gpa & mask));
1417 shift = actual_shift;
1418 }
1419 level = kvmppc_radix_shift_to_level(shift);
1420 n_gpa &= ~((1UL << shift) - 1);
1421
1422 /* 4. Insert the pte into our shadow_pgtable */
1423
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +11001424 n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
1425 if (!n_rmap)
1426 return RESUME_GUEST; /* Let the guest try again */
1427 n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
1428 (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1429 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001430 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
Suraj Jitindar Singh8cf531e2018-10-08 16:31:08 +11001431 mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1432 if (n_rmap)
1433 kfree(n_rmap);
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001434 if (ret == -EAGAIN)
1435 ret = RESUME_GUEST; /* Let the guest try again */
1436
1437 return ret;
1438
1439 inval:
1440 kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1441 return RESUME_GUEST;
1442}
1443
Suraj Jitindar Singh873db2c2018-12-14 16:29:08 +11001444long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu)
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001445{
1446 struct kvm_nested_guest *gp = vcpu->arch.nested;
1447 long int ret;
1448
1449 mutex_lock(&gp->tlb_lock);
Suraj Jitindar Singh873db2c2018-12-14 16:29:08 +11001450 ret = __kvmhv_nested_page_fault(run, vcpu, gp);
Suraj Jitindar Singhfd10be22018-10-08 16:31:07 +11001451 mutex_unlock(&gp->tlb_lock);
1452 return ret;
1453}
Paul Mackerras83a05512018-10-08 16:31:17 +11001454
1455int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1456{
1457 int ret = -1;
1458
1459 spin_lock(&kvm->mmu_lock);
1460 while (++lpid <= kvm->arch.max_nested_lpid) {
1461 if (kvm->arch.nested_guests[lpid]) {
1462 ret = lpid;
1463 break;
1464 }
1465 }
1466 spin_unlock(&kvm->mmu_lock);
1467 return ret;
1468}