Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2015 - ARM Ltd |
| 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
Marc Zyngier | 13720a5 | 2016-01-28 13:44:07 +0000 | [diff] [blame] | 7 | #include <asm/kvm_hyp.h> |
Marc Zyngier | d681198 | 2017-10-23 17:11:14 +0100 | [diff] [blame] | 8 | #include <asm/kvm_mmu.h> |
Christopher Covington | fa71531 | 2017-01-25 10:52:31 -0500 | [diff] [blame] | 9 | #include <asm/tlbflush.h> |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 10 | |
Marc Zyngier | eb036ad | 2018-12-06 17:31:25 +0000 | [diff] [blame] | 11 | struct tlb_inv_context { |
Marc Zyngier | eb036ad | 2018-12-06 17:31:25 +0000 | [diff] [blame] | 12 | u64 tcr; |
Marc Zyngier | eb036ad | 2018-12-06 17:31:25 +0000 | [diff] [blame] | 13 | }; |
| 14 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 15 | static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, |
| 16 | struct tlb_inv_context *cxt) |
Marc Zyngier | 6892517 | 2017-02-17 14:32:18 +0000 | [diff] [blame] | 17 | { |
Andrew Scull | 02ab1f5 | 2020-05-04 10:48:58 +0100 | [diff] [blame] | 18 | if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { |
Marc Zyngier | 3755394 | 2019-07-30 10:50:38 +0100 | [diff] [blame] | 19 | u64 val; |
| 20 | |
| 21 | /* |
| 22 | * For CPUs that are affected by ARM 1319367, we need to |
| 23 | * avoid a host Stage-1 walk while we have the guest's |
| 24 | * VMID set in the VTTBR in order to invalidate TLBs. |
| 25 | * We're guaranteed that the S1 MMU is enabled, so we can |
| 26 | * simply set the EPD bits to avoid any further TLB fill. |
| 27 | */ |
| 28 | val = cxt->tcr = read_sysreg_el1(SYS_TCR); |
| 29 | val |= TCR_EPD1_MASK | TCR_EPD0_MASK; |
| 30 | write_sysreg_el1(val, SYS_TCR); |
| 31 | isb(); |
| 32 | } |
| 33 | |
Marc Zyngier | 452d6222 | 2020-07-13 15:15:14 +0100 | [diff] [blame] | 34 | /* |
| 35 | * __load_guest_stage2() includes an ISB only when the AT |
| 36 | * workaround is applied. Take care of the opposite condition, |
| 37 | * ensuring that we always have an ISB, but not two ISBs back |
| 38 | * to back. |
| 39 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 40 | __load_guest_stage2(mmu); |
Marc Zyngier | 452d6222 | 2020-07-13 15:15:14 +0100 | [diff] [blame] | 41 | asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); |
Marc Zyngier | 6892517 | 2017-02-17 14:32:18 +0000 | [diff] [blame] | 42 | } |
| 43 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 44 | static void __tlb_switch_to_host(struct tlb_inv_context *cxt) |
Marc Zyngier | 6892517 | 2017-02-17 14:32:18 +0000 | [diff] [blame] | 45 | { |
| 46 | write_sysreg(0, vttbr_el2); |
Marc Zyngier | 3755394 | 2019-07-30 10:50:38 +0100 | [diff] [blame] | 47 | |
Andrew Scull | 02ab1f5 | 2020-05-04 10:48:58 +0100 | [diff] [blame] | 48 | if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { |
Marc Zyngier | 3755394 | 2019-07-30 10:50:38 +0100 | [diff] [blame] | 49 | /* Ensure write of the host VMID */ |
| 50 | isb(); |
| 51 | /* Restore the host's TCR_EL1 */ |
| 52 | write_sysreg_el1(cxt->tcr, SYS_TCR); |
| 53 | } |
Marc Zyngier | 6892517 | 2017-02-17 14:32:18 +0000 | [diff] [blame] | 54 | } |
| 55 | |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 56 | void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, |
| 57 | phys_addr_t ipa, int level) |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 58 | { |
Marc Zyngier | eb036ad | 2018-12-06 17:31:25 +0000 | [diff] [blame] | 59 | struct tlb_inv_context cxt; |
Marc Zyngier | c987876 | 2018-12-06 17:31:19 +0000 | [diff] [blame] | 60 | |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 61 | dsb(ishst); |
| 62 | |
| 63 | /* Switch to requested VMID */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 64 | __tlb_switch_to_guest(mmu, &cxt); |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 65 | |
| 66 | /* |
| 67 | * We could do so much better if we had the VA as well. |
| 68 | * Instead, we invalidate Stage-2 for this IPA, and the |
| 69 | * whole of Stage-1. Weep... |
| 70 | */ |
| 71 | ipa >>= 12; |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 72 | __tlbi_level(ipas2e1is, ipa, level); |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 73 | |
| 74 | /* |
| 75 | * We have to ensure completion of the invalidation at Stage-2, |
| 76 | * since a table walk on another CPU could refill a TLB with a |
| 77 | * complete (S1 + S2) walk based on the old Stage-2 mapping if |
| 78 | * the Stage-1 invalidation happened first. |
| 79 | */ |
| 80 | dsb(ish); |
Christopher Covington | fa71531 | 2017-01-25 10:52:31 -0500 | [diff] [blame] | 81 | __tlbi(vmalle1is); |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 82 | dsb(ish); |
| 83 | isb(); |
| 84 | |
Will Deacon | 87da236 | 2017-03-10 20:32:25 +0000 | [diff] [blame] | 85 | /* |
| 86 | * If the host is running at EL1 and we have a VPIPT I-cache, |
| 87 | * then we must perform I-cache maintenance at EL2 in order for |
| 88 | * it to have an effect on the guest. Since the guest cannot hit |
| 89 | * I-cache lines allocated with a different VMID, we don't need |
| 90 | * to worry about junk out of guest reset (we nuke the I-cache on |
| 91 | * VMID rollover), but we do need to be careful when remapping |
| 92 | * executable pages for the same guest. This can happen when KSM |
| 93 | * takes a CoW fault on an executable page, copies the page into |
| 94 | * a page that was previously mapped in the guest and then needs |
| 95 | * to invalidate the guest view of the I-cache for that page |
| 96 | * from EL1. To solve this, we invalidate the entire I-cache when |
| 97 | * unmapping a page from a guest if we have a VPIPT I-cache but |
| 98 | * the host is running at EL1. As above, we could do better if |
| 99 | * we had the VA. |
| 100 | * |
| 101 | * The moral of this story is: if you have a VPIPT I-cache, then |
| 102 | * you should be running with VHE enabled. |
| 103 | */ |
David Brazdil | e03fa29 | 2020-06-25 14:14:13 +0100 | [diff] [blame] | 104 | if (icache_is_vpipt()) |
Will Deacon | 87da236 | 2017-03-10 20:32:25 +0000 | [diff] [blame] | 105 | __flush_icache_all(); |
| 106 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 107 | __tlb_switch_to_host(&cxt); |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 108 | } |
| 109 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 110 | void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 111 | { |
Marc Zyngier | eb036ad | 2018-12-06 17:31:25 +0000 | [diff] [blame] | 112 | struct tlb_inv_context cxt; |
Marc Zyngier | c987876 | 2018-12-06 17:31:19 +0000 | [diff] [blame] | 113 | |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 114 | dsb(ishst); |
| 115 | |
| 116 | /* Switch to requested VMID */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 117 | __tlb_switch_to_guest(mmu, &cxt); |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 118 | |
Christopher Covington | fa71531 | 2017-01-25 10:52:31 -0500 | [diff] [blame] | 119 | __tlbi(vmalls12e1is); |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 120 | dsb(ish); |
| 121 | isb(); |
| 122 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 123 | __tlb_switch_to_host(&cxt); |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 124 | } |
| 125 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 126 | void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 127 | { |
Marc Zyngier | eb036ad | 2018-12-06 17:31:25 +0000 | [diff] [blame] | 128 | struct tlb_inv_context cxt; |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 129 | |
| 130 | /* Switch to requested VMID */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 131 | mmu = kern_hyp_va(mmu); |
| 132 | __tlb_switch_to_guest(mmu, &cxt); |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 133 | |
Christopher Covington | fa71531 | 2017-01-25 10:52:31 -0500 | [diff] [blame] | 134 | __tlbi(vmalle1); |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 135 | dsb(nsh); |
| 136 | isb(); |
| 137 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 138 | __tlb_switch_to_host(&cxt); |
Marc Zyngier | 94d0e59 | 2016-10-18 18:37:49 +0100 | [diff] [blame] | 139 | } |
| 140 | |
David Brazdil | c50cb04 | 2020-06-25 14:14:19 +0100 | [diff] [blame] | 141 | void __kvm_flush_vm_context(void) |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 142 | { |
| 143 | dsb(ishst); |
Christopher Covington | fa71531 | 2017-01-25 10:52:31 -0500 | [diff] [blame] | 144 | __tlbi(alle1is); |
Mark Rutland | 363de99 | 2019-08-06 16:57:37 +0100 | [diff] [blame] | 145 | |
| 146 | /* |
| 147 | * VIPT and PIPT caches are not affected by VMID, so no maintenance |
| 148 | * is necessary across a VMID rollover. |
| 149 | * |
| 150 | * VPIPT caches constrain lookup and maintenance to the active VMID, |
| 151 | * so we need to invalidate lines with a stale VMID to avoid an ABA |
| 152 | * race after multiple rollovers. |
| 153 | * |
| 154 | */ |
| 155 | if (icache_is_vpipt()) |
| 156 | asm volatile("ic ialluis"); |
| 157 | |
Marc Zyngier | 5eec0a9 | 2015-10-23 08:26:37 +0100 | [diff] [blame] | 158 | dsb(ish); |
| 159 | } |