blob: 39ca71ab88664755d4a0ef8774ec19971035cea7 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngier5eec0a92015-10-23 08:26:37 +01002/*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngier5eec0a92015-10-23 08:26:37 +01005 */
6
Marc Zyngier13720a52016-01-28 13:44:07 +00007#include <asm/kvm_hyp.h>
Marc Zyngierd6811982017-10-23 17:11:14 +01008#include <asm/kvm_mmu.h>
Christopher Covingtonfa715312017-01-25 10:52:31 -05009#include <asm/tlbflush.h>
Marc Zyngier5eec0a92015-10-23 08:26:37 +010010
Marc Zyngiereb036ad2018-12-06 17:31:25 +000011struct tlb_inv_context {
Marc Zyngiereb036ad2018-12-06 17:31:25 +000012 u64 tcr;
Marc Zyngiereb036ad2018-12-06 17:31:25 +000013};
14
Christoffer Dalla0e50aa2019-01-04 21:09:05 +010015static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
16 struct tlb_inv_context *cxt)
Marc Zyngier68925172017-02-17 14:32:18 +000017{
Andrew Scull02ab1f52020-05-04 10:48:58 +010018 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
Marc Zyngier37553942019-07-30 10:50:38 +010019 u64 val;
20
21 /*
22 * For CPUs that are affected by ARM 1319367, we need to
23 * avoid a host Stage-1 walk while we have the guest's
24 * VMID set in the VTTBR in order to invalidate TLBs.
25 * We're guaranteed that the S1 MMU is enabled, so we can
26 * simply set the EPD bits to avoid any further TLB fill.
27 */
28 val = cxt->tcr = read_sysreg_el1(SYS_TCR);
29 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
30 write_sysreg_el1(val, SYS_TCR);
31 isb();
32 }
33
Marc Zyngier452d62222020-07-13 15:15:14 +010034 /*
35 * __load_guest_stage2() includes an ISB only when the AT
36 * workaround is applied. Take care of the opposite condition,
37 * ensuring that we always have an ISB, but not two ISBs back
38 * to back.
39 */
Christoffer Dalla0e50aa2019-01-04 21:09:05 +010040 __load_guest_stage2(mmu);
Marc Zyngier452d62222020-07-13 15:15:14 +010041 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
Marc Zyngier68925172017-02-17 14:32:18 +000042}
43
Christoffer Dalla0e50aa2019-01-04 21:09:05 +010044static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
Marc Zyngier68925172017-02-17 14:32:18 +000045{
46 write_sysreg(0, vttbr_el2);
Marc Zyngier37553942019-07-30 10:50:38 +010047
Andrew Scull02ab1f52020-05-04 10:48:58 +010048 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
Marc Zyngier37553942019-07-30 10:50:38 +010049 /* Ensure write of the host VMID */
50 isb();
51 /* Restore the host's TCR_EL1 */
52 write_sysreg_el1(cxt->tcr, SYS_TCR);
53 }
Marc Zyngier68925172017-02-17 14:32:18 +000054}
55
Marc Zyngierefaa5b92019-01-02 12:34:25 +000056void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
57 phys_addr_t ipa, int level)
Marc Zyngier5eec0a92015-10-23 08:26:37 +010058{
Marc Zyngiereb036ad2018-12-06 17:31:25 +000059 struct tlb_inv_context cxt;
Marc Zyngierc9878762018-12-06 17:31:19 +000060
Marc Zyngier5eec0a92015-10-23 08:26:37 +010061 dsb(ishst);
62
63 /* Switch to requested VMID */
Christoffer Dalla0e50aa2019-01-04 21:09:05 +010064 __tlb_switch_to_guest(mmu, &cxt);
Marc Zyngier5eec0a92015-10-23 08:26:37 +010065
66 /*
67 * We could do so much better if we had the VA as well.
68 * Instead, we invalidate Stage-2 for this IPA, and the
69 * whole of Stage-1. Weep...
70 */
71 ipa >>= 12;
Marc Zyngierefaa5b92019-01-02 12:34:25 +000072 __tlbi_level(ipas2e1is, ipa, level);
Marc Zyngier5eec0a92015-10-23 08:26:37 +010073
74 /*
75 * We have to ensure completion of the invalidation at Stage-2,
76 * since a table walk on another CPU could refill a TLB with a
77 * complete (S1 + S2) walk based on the old Stage-2 mapping if
78 * the Stage-1 invalidation happened first.
79 */
80 dsb(ish);
Christopher Covingtonfa715312017-01-25 10:52:31 -050081 __tlbi(vmalle1is);
Marc Zyngier5eec0a92015-10-23 08:26:37 +010082 dsb(ish);
83 isb();
84
Will Deacon87da2362017-03-10 20:32:25 +000085 /*
86 * If the host is running at EL1 and we have a VPIPT I-cache,
87 * then we must perform I-cache maintenance at EL2 in order for
88 * it to have an effect on the guest. Since the guest cannot hit
89 * I-cache lines allocated with a different VMID, we don't need
90 * to worry about junk out of guest reset (we nuke the I-cache on
91 * VMID rollover), but we do need to be careful when remapping
92 * executable pages for the same guest. This can happen when KSM
93 * takes a CoW fault on an executable page, copies the page into
94 * a page that was previously mapped in the guest and then needs
95 * to invalidate the guest view of the I-cache for that page
96 * from EL1. To solve this, we invalidate the entire I-cache when
97 * unmapping a page from a guest if we have a VPIPT I-cache but
98 * the host is running at EL1. As above, we could do better if
99 * we had the VA.
100 *
101 * The moral of this story is: if you have a VPIPT I-cache, then
102 * you should be running with VHE enabled.
103 */
David Brazdile03fa292020-06-25 14:14:13 +0100104 if (icache_is_vpipt())
Will Deacon87da2362017-03-10 20:32:25 +0000105 __flush_icache_all();
106
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100107 __tlb_switch_to_host(&cxt);
Marc Zyngier5eec0a92015-10-23 08:26:37 +0100108}
109
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100110void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
Marc Zyngier5eec0a92015-10-23 08:26:37 +0100111{
Marc Zyngiereb036ad2018-12-06 17:31:25 +0000112 struct tlb_inv_context cxt;
Marc Zyngierc9878762018-12-06 17:31:19 +0000113
Marc Zyngier5eec0a92015-10-23 08:26:37 +0100114 dsb(ishst);
115
116 /* Switch to requested VMID */
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100117 __tlb_switch_to_guest(mmu, &cxt);
Marc Zyngier5eec0a92015-10-23 08:26:37 +0100118
Christopher Covingtonfa715312017-01-25 10:52:31 -0500119 __tlbi(vmalls12e1is);
Marc Zyngier5eec0a92015-10-23 08:26:37 +0100120 dsb(ish);
121 isb();
122
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100123 __tlb_switch_to_host(&cxt);
Marc Zyngier5eec0a92015-10-23 08:26:37 +0100124}
125
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100126void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
Marc Zyngier94d0e592016-10-18 18:37:49 +0100127{
Marc Zyngiereb036ad2018-12-06 17:31:25 +0000128 struct tlb_inv_context cxt;
Marc Zyngier94d0e592016-10-18 18:37:49 +0100129
130 /* Switch to requested VMID */
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100131 mmu = kern_hyp_va(mmu);
132 __tlb_switch_to_guest(mmu, &cxt);
Marc Zyngier94d0e592016-10-18 18:37:49 +0100133
Christopher Covingtonfa715312017-01-25 10:52:31 -0500134 __tlbi(vmalle1);
Marc Zyngier94d0e592016-10-18 18:37:49 +0100135 dsb(nsh);
136 isb();
137
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100138 __tlb_switch_to_host(&cxt);
Marc Zyngier94d0e592016-10-18 18:37:49 +0100139}
140
David Brazdilc50cb042020-06-25 14:14:19 +0100141void __kvm_flush_vm_context(void)
Marc Zyngier5eec0a92015-10-23 08:26:37 +0100142{
143 dsb(ishst);
Christopher Covingtonfa715312017-01-25 10:52:31 -0500144 __tlbi(alle1is);
Mark Rutland363de992019-08-06 16:57:37 +0100145
146 /*
147 * VIPT and PIPT caches are not affected by VMID, so no maintenance
148 * is necessary across a VMID rollover.
149 *
150 * VPIPT caches constrain lookup and maintenance to the active VMID,
151 * so we need to invalidate lines with a stale VMID to avoid an ABA
152 * race after multiple rollovers.
153 *
154 */
155 if (icache_is_vpipt())
156 asm volatile("ic ialluis");
157
Marc Zyngier5eec0a92015-10-23 08:26:37 +0100158 dsb(ish);
159}