Thomas Gleixner | d94d71c | 2019-05-29 07:12:40 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 4 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 5 | */ |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 6 | |
| 7 | #include <linux/mman.h> |
| 8 | #include <linux/kvm_host.h> |
| 9 | #include <linux/io.h> |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 10 | #include <linux/hugetlb.h> |
James Morse | 196f878 | 2017-06-20 17:11:48 +0100 | [diff] [blame] | 11 | #include <linux/sched/signal.h> |
Christoffer Dall | 45e96ea | 2013-01-20 18:43:58 -0500 | [diff] [blame] | 12 | #include <trace/events/kvm.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 13 | #include <asm/pgalloc.h> |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 14 | #include <asm/cacheflush.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 15 | #include <asm/kvm_arm.h> |
| 16 | #include <asm/kvm_mmu.h> |
James Morse | 0db5e02 | 2019-01-29 18:48:49 +0000 | [diff] [blame] | 17 | #include <asm/kvm_ras.h> |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 18 | #include <asm/kvm_asm.h> |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 19 | #include <asm/kvm_emulate.h> |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 20 | #include <asm/virt.h> |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 21 | |
| 22 | #include "trace.h" |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 23 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 24 | static pgd_t *boot_hyp_pgd; |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 25 | static pgd_t *hyp_pgd; |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 26 | static pgd_t *merged_hyp_pgd; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 27 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
| 28 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 29 | static unsigned long hyp_idmap_start; |
| 30 | static unsigned long hyp_idmap_end; |
| 31 | static phys_addr_t hyp_idmap_vector; |
| 32 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 33 | static unsigned long io_map_base; |
| 34 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 35 | #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) |
Mark Salter | 5d4e08c | 2014-03-28 14:25:19 +0000 | [diff] [blame] | 36 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 37 | #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) |
| 38 | #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1) |
| 39 | |
Marc Zyngier | 6d674e2 | 2019-12-11 16:56:48 +0000 | [diff] [blame] | 40 | static bool is_iomap(unsigned long flags) |
| 41 | { |
| 42 | return flags & KVM_S2PTE_FLAG_IS_IOMAP; |
| 43 | } |
| 44 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 45 | static bool memslot_is_logging(struct kvm_memory_slot *memslot) |
| 46 | { |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 47 | return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); |
Mario Smarduch | 7276030 | 2015-01-15 15:59:01 -0800 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | /** |
| 51 | * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8 |
| 52 | * @kvm: pointer to kvm structure. |
| 53 | * |
| 54 | * Interface to HYP function to flush all VM TLB entries |
| 55 | */ |
| 56 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
| 57 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 58 | kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 59 | } |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 60 | |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 61 | static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, |
| 62 | int level) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 63 | { |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 64 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa, level); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 65 | } |
| 66 | |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 67 | /* |
| 68 | * D-Cache management functions. They take the page table entries by |
| 69 | * value, as they are flushing the cache using the kernel mapping (or |
| 70 | * kmap on 32bit). |
| 71 | */ |
| 72 | static void kvm_flush_dcache_pte(pte_t pte) |
| 73 | { |
| 74 | __kvm_flush_dcache_pte(pte); |
| 75 | } |
| 76 | |
| 77 | static void kvm_flush_dcache_pmd(pmd_t pmd) |
| 78 | { |
| 79 | __kvm_flush_dcache_pmd(pmd); |
| 80 | } |
| 81 | |
| 82 | static void kvm_flush_dcache_pud(pud_t pud) |
| 83 | { |
| 84 | __kvm_flush_dcache_pud(pud); |
| 85 | } |
| 86 | |
Ard Biesheuvel | e6fab54 | 2015-11-10 15:11:20 +0100 | [diff] [blame] | 87 | static bool kvm_is_device_pfn(unsigned long pfn) |
| 88 | { |
| 89 | return !pfn_valid(pfn); |
| 90 | } |
| 91 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 92 | /** |
| 93 | * stage2_dissolve_pmd() - clear and flush huge PMD entry |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 94 | * @mmu: pointer to mmu structure to operate on |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 95 | * @addr: IPA |
| 96 | * @pmd: pmd pointer for IPA |
| 97 | * |
Zenghui Yu | 8324c3d | 2019-03-25 08:02:05 +0000 | [diff] [blame] | 98 | * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 99 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 100 | static void stage2_dissolve_pmd(struct kvm_s2_mmu *mmu, phys_addr_t addr, pmd_t *pmd) |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 101 | { |
Suzuki K Poulose | bbb3b6b | 2016-03-01 12:00:39 +0000 | [diff] [blame] | 102 | if (!pmd_thp_or_huge(*pmd)) |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 103 | return; |
| 104 | |
| 105 | pmd_clear(pmd); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 106 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL); |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 107 | put_page(virt_to_page(pmd)); |
| 108 | } |
| 109 | |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 110 | /** |
| 111 | * stage2_dissolve_pud() - clear and flush huge PUD entry |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 112 | * @mmu: pointer to mmu structure to operate on |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 113 | * @addr: IPA |
| 114 | * @pud: pud pointer for IPA |
| 115 | * |
Zenghui Yu | 8324c3d | 2019-03-25 08:02:05 +0000 | [diff] [blame] | 116 | * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 117 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 118 | static void stage2_dissolve_pud(struct kvm_s2_mmu *mmu, phys_addr_t addr, pud_t *pudp) |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 119 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 120 | struct kvm *kvm = mmu->kvm; |
| 121 | |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 122 | if (!stage2_pud_huge(kvm, *pudp)) |
| 123 | return; |
| 124 | |
| 125 | stage2_pud_clear(kvm, pudp); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 126 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL); |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 127 | put_page(virt_to_page(pudp)); |
| 128 | } |
| 129 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 130 | static void clear_stage2_pgd_entry(struct kvm_s2_mmu *mmu, pgd_t *pgd, phys_addr_t addr) |
Marc Zyngier | 979acd5 | 2013-08-06 13:05:48 +0100 | [diff] [blame] | 131 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 132 | struct kvm *kvm = mmu->kvm; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 133 | p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL); |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 134 | stage2_pgd_clear(kvm, pgd); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 135 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 136 | stage2_p4d_free(kvm, p4d_table); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 137 | put_page(virt_to_page(pgd)); |
Marc Zyngier | 979acd5 | 2013-08-06 13:05:48 +0100 | [diff] [blame] | 138 | } |
| 139 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 140 | static void clear_stage2_p4d_entry(struct kvm_s2_mmu *mmu, p4d_t *p4d, phys_addr_t addr) |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 141 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 142 | struct kvm *kvm = mmu->kvm; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 143 | pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0); |
| 144 | stage2_p4d_clear(kvm, p4d); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 145 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 146 | stage2_pud_free(kvm, pud_table); |
| 147 | put_page(virt_to_page(p4d)); |
| 148 | } |
| 149 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 150 | static void clear_stage2_pud_entry(struct kvm_s2_mmu *mmu, pud_t *pud, phys_addr_t addr) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 151 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 152 | struct kvm *kvm = mmu->kvm; |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 153 | pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 154 | |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 155 | VM_BUG_ON(stage2_pud_huge(kvm, *pud)); |
| 156 | stage2_pud_clear(kvm, pud); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 157 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT); |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 158 | stage2_pmd_free(kvm, pmd_table); |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 159 | put_page(virt_to_page(pud)); |
| 160 | } |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 161 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 162 | static void clear_stage2_pmd_entry(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr_t addr) |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 163 | { |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 164 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
Suzuki K Poulose | bbb3b6b | 2016-03-01 12:00:39 +0000 | [diff] [blame] | 165 | VM_BUG_ON(pmd_thp_or_huge(*pmd)); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 166 | pmd_clear(pmd); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 167 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT); |
Anshuman Khandual | 14b94d0 | 2019-03-12 18:55:45 +0530 | [diff] [blame] | 168 | free_page((unsigned long)pte_table); |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 169 | put_page(virt_to_page(pmd)); |
| 170 | } |
| 171 | |
Marc Zyngier | 88dc25e8 | 2018-05-25 12:23:11 +0100 | [diff] [blame] | 172 | static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte) |
| 173 | { |
| 174 | WRITE_ONCE(*ptep, new_pte); |
| 175 | dsb(ishst); |
| 176 | } |
| 177 | |
| 178 | static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd) |
| 179 | { |
| 180 | WRITE_ONCE(*pmdp, new_pmd); |
| 181 | dsb(ishst); |
| 182 | } |
| 183 | |
Marc Zyngier | 0db9dd8 | 2018-06-27 15:51:05 +0100 | [diff] [blame] | 184 | static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep) |
| 185 | { |
| 186 | kvm_set_pmd(pmdp, kvm_mk_pmd(ptep)); |
| 187 | } |
| 188 | |
| 189 | static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp) |
| 190 | { |
| 191 | WRITE_ONCE(*pudp, kvm_mk_pud(pmdp)); |
| 192 | dsb(ishst); |
| 193 | } |
| 194 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 195 | static inline void kvm_p4d_populate(p4d_t *p4dp, pud_t *pudp) |
Marc Zyngier | 0db9dd8 | 2018-06-27 15:51:05 +0100 | [diff] [blame] | 196 | { |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 197 | WRITE_ONCE(*p4dp, kvm_mk_p4d(pudp)); |
Marc Zyngier | 0db9dd8 | 2018-06-27 15:51:05 +0100 | [diff] [blame] | 198 | dsb(ishst); |
| 199 | } |
| 200 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 201 | static inline void kvm_pgd_populate(pgd_t *pgdp, p4d_t *p4dp) |
| 202 | { |
| 203 | #ifndef __PAGETABLE_P4D_FOLDED |
| 204 | WRITE_ONCE(*pgdp, kvm_mk_pgd(p4dp)); |
| 205 | dsb(ishst); |
| 206 | #endif |
| 207 | } |
| 208 | |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 209 | /* |
| 210 | * Unmapping vs dcache management: |
| 211 | * |
| 212 | * If a guest maps certain memory pages as uncached, all writes will |
| 213 | * bypass the data cache and go directly to RAM. However, the CPUs |
| 214 | * can still speculate reads (not writes) and fill cache lines with |
| 215 | * data. |
| 216 | * |
| 217 | * Those cache lines will be *clean* cache lines though, so a |
| 218 | * clean+invalidate operation is equivalent to an invalidate |
| 219 | * operation, because no cache lines are marked dirty. |
| 220 | * |
| 221 | * Those clean cache lines could be filled prior to an uncached write |
| 222 | * by the guest, and the cache coherent IO subsystem would therefore |
| 223 | * end up writing old data to disk. |
| 224 | * |
| 225 | * This is why right after unmapping a page/section and invalidating |
| 226 | * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure |
| 227 | * the IO subsystem will never hit in the cache. |
Marc Zyngier | e48d53a | 2018-04-06 12:27:28 +0100 | [diff] [blame] | 228 | * |
| 229 | * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as |
| 230 | * we then fully enforce cacheability of RAM, no matter what the guest |
| 231 | * does. |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 232 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 233 | static void unmap_stage2_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd, |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 234 | phys_addr_t addr, phys_addr_t end) |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 235 | { |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 236 | phys_addr_t start_addr = addr; |
| 237 | pte_t *pte, *start_pte; |
| 238 | |
| 239 | start_pte = pte = pte_offset_kernel(pmd, addr); |
| 240 | do { |
| 241 | if (!pte_none(*pte)) { |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 242 | pte_t old_pte = *pte; |
| 243 | |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 244 | kvm_set_pte(pte, __pte(0)); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 245 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL); |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 246 | |
| 247 | /* No need to invalidate the cache for device mappings */ |
Ard Biesheuvel | 0de58f8 | 2015-12-03 09:25:22 +0100 | [diff] [blame] | 248 | if (!kvm_is_device_pfn(pte_pfn(old_pte))) |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 249 | kvm_flush_dcache_pte(old_pte); |
| 250 | |
| 251 | put_page(virt_to_page(pte)); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 252 | } |
| 253 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 254 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 255 | if (stage2_pte_table_empty(mmu->kvm, start_pte)) |
| 256 | clear_stage2_pmd_entry(mmu, pmd, start_addr); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 257 | } |
| 258 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 259 | static void unmap_stage2_pmds(struct kvm_s2_mmu *mmu, pud_t *pud, |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 260 | phys_addr_t addr, phys_addr_t end) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 261 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 262 | struct kvm *kvm = mmu->kvm; |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 263 | phys_addr_t next, start_addr = addr; |
| 264 | pmd_t *pmd, *start_pmd; |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 265 | |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 266 | start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 267 | do { |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 268 | next = stage2_pmd_addr_end(kvm, addr, end); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 269 | if (!pmd_none(*pmd)) { |
Suzuki K Poulose | bbb3b6b | 2016-03-01 12:00:39 +0000 | [diff] [blame] | 270 | if (pmd_thp_or_huge(*pmd)) { |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 271 | pmd_t old_pmd = *pmd; |
| 272 | |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 273 | pmd_clear(pmd); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 274 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL); |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 275 | |
| 276 | kvm_flush_dcache_pmd(old_pmd); |
| 277 | |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 278 | put_page(virt_to_page(pmd)); |
| 279 | } else { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 280 | unmap_stage2_ptes(mmu, pmd, addr, next); |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 281 | } |
| 282 | } |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 283 | } while (pmd++, addr = next, addr != end); |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 284 | |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 285 | if (stage2_pmd_table_empty(kvm, start_pmd)) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 286 | clear_stage2_pud_entry(mmu, pud, start_addr); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 287 | } |
| 288 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 289 | static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d, |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 290 | phys_addr_t addr, phys_addr_t end) |
| 291 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 292 | struct kvm *kvm = mmu->kvm; |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 293 | phys_addr_t next, start_addr = addr; |
| 294 | pud_t *pud, *start_pud; |
| 295 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 296 | start_pud = pud = stage2_pud_offset(kvm, p4d, addr); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 297 | do { |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 298 | next = stage2_pud_addr_end(kvm, addr, end); |
| 299 | if (!stage2_pud_none(kvm, *pud)) { |
| 300 | if (stage2_pud_huge(kvm, *pud)) { |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 301 | pud_t old_pud = *pud; |
| 302 | |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 303 | stage2_pud_clear(kvm, pud); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 304 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL); |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 305 | kvm_flush_dcache_pud(old_pud); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 306 | put_page(virt_to_page(pud)); |
| 307 | } else { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 308 | unmap_stage2_pmds(mmu, pud, addr, next); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 309 | } |
| 310 | } |
| 311 | } while (pud++, addr = next, addr != end); |
| 312 | |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 313 | if (stage2_pud_table_empty(kvm, start_pud)) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 314 | clear_stage2_p4d_entry(mmu, p4d, start_addr); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 315 | } |
| 316 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 317 | static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd, |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 318 | phys_addr_t addr, phys_addr_t end) |
| 319 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 320 | struct kvm *kvm = mmu->kvm; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 321 | phys_addr_t next, start_addr = addr; |
| 322 | p4d_t *p4d, *start_p4d; |
| 323 | |
| 324 | start_p4d = p4d = stage2_p4d_offset(kvm, pgd, addr); |
| 325 | do { |
| 326 | next = stage2_p4d_addr_end(kvm, addr, end); |
| 327 | if (!stage2_p4d_none(kvm, *p4d)) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 328 | unmap_stage2_puds(mmu, p4d, addr, next); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 329 | } while (p4d++, addr = next, addr != end); |
| 330 | |
| 331 | if (stage2_p4d_table_empty(kvm, start_p4d)) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 332 | clear_stage2_pgd_entry(mmu, pgd, start_addr); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 333 | } |
| 334 | |
Suzuki K Poulose | 7a1c831 | 2016-03-23 12:08:02 +0000 | [diff] [blame] | 335 | /** |
| 336 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range |
| 337 | * @kvm: The VM pointer |
| 338 | * @start: The intermediate physical base address of the range to unmap |
| 339 | * @size: The size of the area to unmap |
| 340 | * |
| 341 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must |
| 342 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before |
| 343 | * destroying the VM), otherwise another faulting VCPU may come in and mess |
| 344 | * with things behind our backs. |
| 345 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 346 | static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 347 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 348 | struct kvm *kvm = mmu->kvm; |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 349 | pgd_t *pgd; |
| 350 | phys_addr_t addr = start, end = start + size; |
| 351 | phys_addr_t next; |
| 352 | |
Suzuki K Poulose | 8b3405e | 2017-04-03 15:12:43 +0100 | [diff] [blame] | 353 | assert_spin_locked(&kvm->mmu_lock); |
Jia He | 47a91b7 | 2018-05-21 11:05:30 +0800 | [diff] [blame] | 354 | WARN_ON(size & ~PAGE_MASK); |
| 355 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 356 | pgd = mmu->pgd + stage2_pgd_index(kvm, addr); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 357 | do { |
Suzuki K Poulose | 0c428a6a | 2017-05-16 10:34:55 +0100 | [diff] [blame] | 358 | /* |
| 359 | * Make sure the page table is still active, as another thread |
| 360 | * could have possibly freed the page table, while we released |
| 361 | * the lock. |
| 362 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 363 | if (!READ_ONCE(mmu->pgd)) |
Suzuki K Poulose | 0c428a6a | 2017-05-16 10:34:55 +0100 | [diff] [blame] | 364 | break; |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 365 | next = stage2_pgd_addr_end(kvm, addr, end); |
| 366 | if (!stage2_pgd_none(kvm, *pgd)) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 367 | unmap_stage2_p4ds(mmu, pgd, addr, next); |
Suzuki K Poulose | 8b3405e | 2017-04-03 15:12:43 +0100 | [diff] [blame] | 368 | /* |
| 369 | * If the range is too large, release the kvm->mmu_lock |
| 370 | * to prevent starvation and lockup detector warnings. |
| 371 | */ |
| 372 | if (next != end) |
| 373 | cond_resched_lock(&kvm->mmu_lock); |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 374 | } while (pgd++, addr = next, addr != end); |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 375 | } |
| 376 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 377 | static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd, |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 378 | phys_addr_t addr, phys_addr_t end) |
| 379 | { |
| 380 | pte_t *pte; |
| 381 | |
| 382 | pte = pte_offset_kernel(pmd, addr); |
| 383 | do { |
Ard Biesheuvel | 0de58f8 | 2015-12-03 09:25:22 +0100 | [diff] [blame] | 384 | if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte))) |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 385 | kvm_flush_dcache_pte(*pte); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 386 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 387 | } |
| 388 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 389 | static void stage2_flush_pmds(struct kvm_s2_mmu *mmu, pud_t *pud, |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 390 | phys_addr_t addr, phys_addr_t end) |
| 391 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 392 | struct kvm *kvm = mmu->kvm; |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 393 | pmd_t *pmd; |
| 394 | phys_addr_t next; |
| 395 | |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 396 | pmd = stage2_pmd_offset(kvm, pud, addr); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 397 | do { |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 398 | next = stage2_pmd_addr_end(kvm, addr, end); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 399 | if (!pmd_none(*pmd)) { |
Suzuki K Poulose | bbb3b6b | 2016-03-01 12:00:39 +0000 | [diff] [blame] | 400 | if (pmd_thp_or_huge(*pmd)) |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 401 | kvm_flush_dcache_pmd(*pmd); |
| 402 | else |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 403 | stage2_flush_ptes(mmu, pmd, addr, next); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 404 | } |
| 405 | } while (pmd++, addr = next, addr != end); |
| 406 | } |
| 407 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 408 | static void stage2_flush_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d, |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 409 | phys_addr_t addr, phys_addr_t end) |
| 410 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 411 | struct kvm *kvm = mmu->kvm; |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 412 | pud_t *pud; |
| 413 | phys_addr_t next; |
| 414 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 415 | pud = stage2_pud_offset(kvm, p4d, addr); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 416 | do { |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 417 | next = stage2_pud_addr_end(kvm, addr, end); |
| 418 | if (!stage2_pud_none(kvm, *pud)) { |
| 419 | if (stage2_pud_huge(kvm, *pud)) |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 420 | kvm_flush_dcache_pud(*pud); |
| 421 | else |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 422 | stage2_flush_pmds(mmu, pud, addr, next); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 423 | } |
| 424 | } while (pud++, addr = next, addr != end); |
| 425 | } |
| 426 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 427 | static void stage2_flush_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd, |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 428 | phys_addr_t addr, phys_addr_t end) |
| 429 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 430 | struct kvm *kvm = mmu->kvm; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 431 | p4d_t *p4d; |
| 432 | phys_addr_t next; |
| 433 | |
| 434 | p4d = stage2_p4d_offset(kvm, pgd, addr); |
| 435 | do { |
| 436 | next = stage2_p4d_addr_end(kvm, addr, end); |
| 437 | if (!stage2_p4d_none(kvm, *p4d)) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 438 | stage2_flush_puds(mmu, p4d, addr, next); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 439 | } while (p4d++, addr = next, addr != end); |
| 440 | } |
| 441 | |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 442 | static void stage2_flush_memslot(struct kvm *kvm, |
| 443 | struct kvm_memory_slot *memslot) |
| 444 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 445 | struct kvm_s2_mmu *mmu = &kvm->arch.mmu; |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 446 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; |
| 447 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; |
| 448 | phys_addr_t next; |
| 449 | pgd_t *pgd; |
| 450 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 451 | pgd = mmu->pgd + stage2_pgd_index(kvm, addr); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 452 | do { |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 453 | next = stage2_pgd_addr_end(kvm, addr, end); |
| 454 | if (!stage2_pgd_none(kvm, *pgd)) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 455 | stage2_flush_p4ds(mmu, pgd, addr, next); |
Jiang Yi | 48c963e | 2020-04-15 10:42:29 +0200 | [diff] [blame] | 456 | |
| 457 | if (next != end) |
| 458 | cond_resched_lock(&kvm->mmu_lock); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 459 | } while (pgd++, addr = next, addr != end); |
| 460 | } |
| 461 | |
| 462 | /** |
| 463 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 |
| 464 | * @kvm: The struct kvm pointer |
| 465 | * |
| 466 | * Go through the stage 2 page tables and invalidate any cache lines |
| 467 | * backing memory already mapped to the VM. |
| 468 | */ |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 469 | static void stage2_flush_vm(struct kvm *kvm) |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 470 | { |
| 471 | struct kvm_memslots *slots; |
| 472 | struct kvm_memory_slot *memslot; |
| 473 | int idx; |
| 474 | |
| 475 | idx = srcu_read_lock(&kvm->srcu); |
| 476 | spin_lock(&kvm->mmu_lock); |
| 477 | |
| 478 | slots = kvm_memslots(kvm); |
| 479 | kvm_for_each_memslot(memslot, slots) |
| 480 | stage2_flush_memslot(kvm, memslot); |
| 481 | |
| 482 | spin_unlock(&kvm->mmu_lock); |
| 483 | srcu_read_unlock(&kvm->srcu, idx); |
| 484 | } |
| 485 | |
Suzuki K Poulose | 64f3249 | 2016-03-22 18:56:21 +0000 | [diff] [blame] | 486 | static void clear_hyp_pgd_entry(pgd_t *pgd) |
| 487 | { |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 488 | p4d_t *p4d_table __maybe_unused = p4d_offset(pgd, 0UL); |
Suzuki K Poulose | 64f3249 | 2016-03-22 18:56:21 +0000 | [diff] [blame] | 489 | pgd_clear(pgd); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 490 | p4d_free(NULL, p4d_table); |
Suzuki K Poulose | 64f3249 | 2016-03-22 18:56:21 +0000 | [diff] [blame] | 491 | put_page(virt_to_page(pgd)); |
| 492 | } |
| 493 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 494 | static void clear_hyp_p4d_entry(p4d_t *p4d) |
| 495 | { |
| 496 | pud_t *pud_table __maybe_unused = pud_offset(p4d, 0UL); |
| 497 | VM_BUG_ON(p4d_huge(*p4d)); |
| 498 | p4d_clear(p4d); |
| 499 | pud_free(NULL, pud_table); |
| 500 | put_page(virt_to_page(p4d)); |
| 501 | } |
| 502 | |
Suzuki K Poulose | 64f3249 | 2016-03-22 18:56:21 +0000 | [diff] [blame] | 503 | static void clear_hyp_pud_entry(pud_t *pud) |
| 504 | { |
| 505 | pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0); |
| 506 | VM_BUG_ON(pud_huge(*pud)); |
| 507 | pud_clear(pud); |
| 508 | pmd_free(NULL, pmd_table); |
| 509 | put_page(virt_to_page(pud)); |
| 510 | } |
| 511 | |
| 512 | static void clear_hyp_pmd_entry(pmd_t *pmd) |
| 513 | { |
| 514 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
| 515 | VM_BUG_ON(pmd_thp_or_huge(*pmd)); |
| 516 | pmd_clear(pmd); |
| 517 | pte_free_kernel(NULL, pte_table); |
| 518 | put_page(virt_to_page(pmd)); |
| 519 | } |
| 520 | |
| 521 | static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) |
| 522 | { |
| 523 | pte_t *pte, *start_pte; |
| 524 | |
| 525 | start_pte = pte = pte_offset_kernel(pmd, addr); |
| 526 | do { |
| 527 | if (!pte_none(*pte)) { |
| 528 | kvm_set_pte(pte, __pte(0)); |
| 529 | put_page(virt_to_page(pte)); |
| 530 | } |
| 531 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 532 | |
| 533 | if (hyp_pte_table_empty(start_pte)) |
| 534 | clear_hyp_pmd_entry(pmd); |
| 535 | } |
| 536 | |
| 537 | static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) |
| 538 | { |
| 539 | phys_addr_t next; |
| 540 | pmd_t *pmd, *start_pmd; |
| 541 | |
| 542 | start_pmd = pmd = pmd_offset(pud, addr); |
| 543 | do { |
| 544 | next = pmd_addr_end(addr, end); |
| 545 | /* Hyp doesn't use huge pmds */ |
| 546 | if (!pmd_none(*pmd)) |
| 547 | unmap_hyp_ptes(pmd, addr, next); |
| 548 | } while (pmd++, addr = next, addr != end); |
| 549 | |
| 550 | if (hyp_pmd_table_empty(start_pmd)) |
| 551 | clear_hyp_pud_entry(pud); |
| 552 | } |
| 553 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 554 | static void unmap_hyp_puds(p4d_t *p4d, phys_addr_t addr, phys_addr_t end) |
Suzuki K Poulose | 64f3249 | 2016-03-22 18:56:21 +0000 | [diff] [blame] | 555 | { |
| 556 | phys_addr_t next; |
| 557 | pud_t *pud, *start_pud; |
| 558 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 559 | start_pud = pud = pud_offset(p4d, addr); |
Suzuki K Poulose | 64f3249 | 2016-03-22 18:56:21 +0000 | [diff] [blame] | 560 | do { |
| 561 | next = pud_addr_end(addr, end); |
| 562 | /* Hyp doesn't use huge puds */ |
| 563 | if (!pud_none(*pud)) |
| 564 | unmap_hyp_pmds(pud, addr, next); |
| 565 | } while (pud++, addr = next, addr != end); |
| 566 | |
| 567 | if (hyp_pud_table_empty(start_pud)) |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 568 | clear_hyp_p4d_entry(p4d); |
| 569 | } |
| 570 | |
| 571 | static void unmap_hyp_p4ds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) |
| 572 | { |
| 573 | phys_addr_t next; |
| 574 | p4d_t *p4d, *start_p4d; |
| 575 | |
| 576 | start_p4d = p4d = p4d_offset(pgd, addr); |
| 577 | do { |
| 578 | next = p4d_addr_end(addr, end); |
| 579 | /* Hyp doesn't use huge p4ds */ |
| 580 | if (!p4d_none(*p4d)) |
| 581 | unmap_hyp_puds(p4d, addr, next); |
| 582 | } while (p4d++, addr = next, addr != end); |
| 583 | |
| 584 | if (hyp_p4d_table_empty(start_p4d)) |
Suzuki K Poulose | 64f3249 | 2016-03-22 18:56:21 +0000 | [diff] [blame] | 585 | clear_hyp_pgd_entry(pgd); |
| 586 | } |
| 587 | |
Marc Zyngier | 3ddd455 | 2018-03-14 15:17:33 +0000 | [diff] [blame] | 588 | static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd) |
| 589 | { |
| 590 | return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1); |
| 591 | } |
| 592 | |
| 593 | static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd, |
| 594 | phys_addr_t start, u64 size) |
Suzuki K Poulose | 64f3249 | 2016-03-22 18:56:21 +0000 | [diff] [blame] | 595 | { |
| 596 | pgd_t *pgd; |
| 597 | phys_addr_t addr = start, end = start + size; |
| 598 | phys_addr_t next; |
| 599 | |
| 600 | /* |
| 601 | * We don't unmap anything from HYP, except at the hyp tear down. |
| 602 | * Hence, we don't have to invalidate the TLBs here. |
| 603 | */ |
Marc Zyngier | 3ddd455 | 2018-03-14 15:17:33 +0000 | [diff] [blame] | 604 | pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd); |
Suzuki K Poulose | 64f3249 | 2016-03-22 18:56:21 +0000 | [diff] [blame] | 605 | do { |
| 606 | next = pgd_addr_end(addr, end); |
| 607 | if (!pgd_none(*pgd)) |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 608 | unmap_hyp_p4ds(pgd, addr, next); |
Suzuki K Poulose | 64f3249 | 2016-03-22 18:56:21 +0000 | [diff] [blame] | 609 | } while (pgd++, addr = next, addr != end); |
| 610 | } |
| 611 | |
Marc Zyngier | 3ddd455 | 2018-03-14 15:17:33 +0000 | [diff] [blame] | 612 | static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size) |
| 613 | { |
| 614 | __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size); |
| 615 | } |
| 616 | |
| 617 | static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size) |
| 618 | { |
| 619 | __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size); |
| 620 | } |
| 621 | |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 622 | /** |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 623 | * free_hyp_pgds - free Hyp-mode page tables |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 624 | * |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 625 | * Assumes hyp_pgd is a page table used strictly in Hyp-mode and |
| 626 | * therefore contains either mappings in the kernel memory area (above |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 627 | * PAGE_OFFSET), or device mappings in the idmap range. |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 628 | * |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 629 | * boot_hyp_pgd should only map the idmap range, and is only used in |
| 630 | * the extended idmap case. |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 631 | */ |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 632 | void free_hyp_pgds(void) |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 633 | { |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 634 | pgd_t *id_pgd; |
| 635 | |
Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 636 | mutex_lock(&kvm_hyp_pgd_mutex); |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 637 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 638 | id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd; |
| 639 | |
| 640 | if (id_pgd) { |
| 641 | /* In case we never called hyp_mmu_init() */ |
| 642 | if (!io_map_base) |
| 643 | io_map_base = hyp_idmap_start; |
| 644 | unmap_hyp_idmap_range(id_pgd, io_map_base, |
| 645 | hyp_idmap_start + PAGE_SIZE - io_map_base); |
| 646 | } |
| 647 | |
Marc Zyngier | 26781f9c | 2016-06-30 18:40:46 +0100 | [diff] [blame] | 648 | if (boot_hyp_pgd) { |
Marc Zyngier | 26781f9c | 2016-06-30 18:40:46 +0100 | [diff] [blame] | 649 | free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); |
| 650 | boot_hyp_pgd = NULL; |
| 651 | } |
| 652 | |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 653 | if (hyp_pgd) { |
Marc Zyngier | 7839c67 | 2017-12-07 11:45:45 +0000 | [diff] [blame] | 654 | unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET), |
| 655 | (uintptr_t)high_memory - PAGE_OFFSET); |
Marc Zyngier | d4cb9df5 | 2013-05-14 12:11:34 +0100 | [diff] [blame] | 656 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 657 | free_pages((unsigned long)hyp_pgd, hyp_pgd_order); |
Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 658 | hyp_pgd = NULL; |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 659 | } |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 660 | if (merged_hyp_pgd) { |
| 661 | clear_page(merged_hyp_pgd); |
| 662 | free_page((unsigned long)merged_hyp_pgd); |
| 663 | merged_hyp_pgd = NULL; |
| 664 | } |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 665 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 666 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| 667 | } |
| 668 | |
| 669 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 670 | unsigned long end, unsigned long pfn, |
| 671 | pgprot_t prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 672 | { |
| 673 | pte_t *pte; |
| 674 | unsigned long addr; |
| 675 | |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 676 | addr = start; |
| 677 | do { |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 678 | pte = pte_offset_kernel(pmd, addr); |
Punit Agrawal | f8df733 | 2018-12-11 17:10:36 +0000 | [diff] [blame] | 679 | kvm_set_pte(pte, kvm_pfn_pte(pfn, prot)); |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 680 | get_page(virt_to_page(pte)); |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 681 | pfn++; |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 682 | } while (addr += PAGE_SIZE, addr != end); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 683 | } |
| 684 | |
| 685 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 686 | unsigned long end, unsigned long pfn, |
| 687 | pgprot_t prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 688 | { |
| 689 | pmd_t *pmd; |
| 690 | pte_t *pte; |
| 691 | unsigned long addr, next; |
| 692 | |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 693 | addr = start; |
| 694 | do { |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 695 | pmd = pmd_offset(pud, addr); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 696 | |
| 697 | BUG_ON(pmd_sect(*pmd)); |
| 698 | |
| 699 | if (pmd_none(*pmd)) { |
Joel Fernandes (Google) | 4cf5892 | 2019-01-03 15:28:34 -0800 | [diff] [blame] | 700 | pte = pte_alloc_one_kernel(NULL); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 701 | if (!pte) { |
| 702 | kvm_err("Cannot allocate Hyp pte\n"); |
| 703 | return -ENOMEM; |
| 704 | } |
Marc Zyngier | 0db9dd8 | 2018-06-27 15:51:05 +0100 | [diff] [blame] | 705 | kvm_pmd_populate(pmd, pte); |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 706 | get_page(virt_to_page(pmd)); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 707 | } |
| 708 | |
| 709 | next = pmd_addr_end(addr, end); |
| 710 | |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 711 | create_hyp_pte_mappings(pmd, addr, next, pfn, prot); |
| 712 | pfn += (next - addr) >> PAGE_SHIFT; |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 713 | } while (addr = next, addr != end); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 714 | |
| 715 | return 0; |
| 716 | } |
| 717 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 718 | static int create_hyp_pud_mappings(p4d_t *p4d, unsigned long start, |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 719 | unsigned long end, unsigned long pfn, |
| 720 | pgprot_t prot) |
| 721 | { |
| 722 | pud_t *pud; |
| 723 | pmd_t *pmd; |
| 724 | unsigned long addr, next; |
| 725 | int ret; |
| 726 | |
| 727 | addr = start; |
| 728 | do { |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 729 | pud = pud_offset(p4d, addr); |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 730 | |
| 731 | if (pud_none_or_clear_bad(pud)) { |
| 732 | pmd = pmd_alloc_one(NULL, addr); |
| 733 | if (!pmd) { |
| 734 | kvm_err("Cannot allocate Hyp pmd\n"); |
| 735 | return -ENOMEM; |
| 736 | } |
Marc Zyngier | 0db9dd8 | 2018-06-27 15:51:05 +0100 | [diff] [blame] | 737 | kvm_pud_populate(pud, pmd); |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 738 | get_page(virt_to_page(pud)); |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 739 | } |
| 740 | |
| 741 | next = pud_addr_end(addr, end); |
| 742 | ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); |
| 743 | if (ret) |
| 744 | return ret; |
| 745 | pfn += (next - addr) >> PAGE_SHIFT; |
| 746 | } while (addr = next, addr != end); |
| 747 | |
| 748 | return 0; |
| 749 | } |
| 750 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 751 | static int create_hyp_p4d_mappings(pgd_t *pgd, unsigned long start, |
| 752 | unsigned long end, unsigned long pfn, |
| 753 | pgprot_t prot) |
| 754 | { |
| 755 | p4d_t *p4d; |
| 756 | pud_t *pud; |
| 757 | unsigned long addr, next; |
| 758 | int ret; |
| 759 | |
| 760 | addr = start; |
| 761 | do { |
| 762 | p4d = p4d_offset(pgd, addr); |
| 763 | |
| 764 | if (p4d_none(*p4d)) { |
| 765 | pud = pud_alloc_one(NULL, addr); |
| 766 | if (!pud) { |
| 767 | kvm_err("Cannot allocate Hyp pud\n"); |
| 768 | return -ENOMEM; |
| 769 | } |
| 770 | kvm_p4d_populate(p4d, pud); |
| 771 | get_page(virt_to_page(p4d)); |
| 772 | } |
| 773 | |
| 774 | next = p4d_addr_end(addr, end); |
| 775 | ret = create_hyp_pud_mappings(p4d, addr, next, pfn, prot); |
| 776 | if (ret) |
| 777 | return ret; |
| 778 | pfn += (next - addr) >> PAGE_SHIFT; |
| 779 | } while (addr = next, addr != end); |
| 780 | |
| 781 | return 0; |
| 782 | } |
| 783 | |
Kristina Martsenko | 98732d1 | 2018-01-15 15:23:49 +0000 | [diff] [blame] | 784 | static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd, |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 785 | unsigned long start, unsigned long end, |
| 786 | unsigned long pfn, pgprot_t prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 787 | { |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 788 | pgd_t *pgd; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 789 | p4d_t *p4d; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 790 | unsigned long addr, next; |
| 791 | int err = 0; |
| 792 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 793 | mutex_lock(&kvm_hyp_pgd_mutex); |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 794 | addr = start & PAGE_MASK; |
| 795 | end = PAGE_ALIGN(end); |
| 796 | do { |
Marc Zyngier | 3ddd455 | 2018-03-14 15:17:33 +0000 | [diff] [blame] | 797 | pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 798 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 799 | if (pgd_none(*pgd)) { |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 800 | p4d = p4d_alloc_one(NULL, addr); |
| 801 | if (!p4d) { |
| 802 | kvm_err("Cannot allocate Hyp p4d\n"); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 803 | err = -ENOMEM; |
| 804 | goto out; |
| 805 | } |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 806 | kvm_pgd_populate(pgd, p4d); |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 807 | get_page(virt_to_page(pgd)); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 808 | } |
| 809 | |
| 810 | next = pgd_addr_end(addr, end); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 811 | err = create_hyp_p4d_mappings(pgd, addr, next, pfn, prot); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 812 | if (err) |
| 813 | goto out; |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 814 | pfn += (next - addr) >> PAGE_SHIFT; |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 815 | } while (addr = next, addr != end); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 816 | out: |
| 817 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| 818 | return err; |
| 819 | } |
| 820 | |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 821 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) |
| 822 | { |
| 823 | if (!is_vmalloc_addr(kaddr)) { |
| 824 | BUG_ON(!virt_addr_valid(kaddr)); |
| 825 | return __pa(kaddr); |
| 826 | } else { |
| 827 | return page_to_phys(vmalloc_to_page(kaddr)) + |
| 828 | offset_in_page(kaddr); |
| 829 | } |
| 830 | } |
| 831 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 832 | /** |
Marc Zyngier | 06e8c3b | 2012-10-28 01:09:14 +0100 | [diff] [blame] | 833 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 834 | * @from: The virtual kernel start address of the range |
| 835 | * @to: The virtual kernel end address of the range (exclusive) |
Marc Zyngier | c8dddec | 2016-06-13 15:00:45 +0100 | [diff] [blame] | 836 | * @prot: The protection to be applied to this range |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 837 | * |
Marc Zyngier | 06e8c3b | 2012-10-28 01:09:14 +0100 | [diff] [blame] | 838 | * The same virtual address as the kernel virtual address is also used |
| 839 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying |
| 840 | * physical pages. |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 841 | */ |
Marc Zyngier | c8dddec | 2016-06-13 15:00:45 +0100 | [diff] [blame] | 842 | int create_hyp_mappings(void *from, void *to, pgprot_t prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 843 | { |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 844 | phys_addr_t phys_addr; |
| 845 | unsigned long virt_addr; |
Marc Zyngier | 6c41a41 | 2016-06-30 18:40:51 +0100 | [diff] [blame] | 846 | unsigned long start = kern_hyp_va((unsigned long)from); |
| 847 | unsigned long end = kern_hyp_va((unsigned long)to); |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 848 | |
Marc Zyngier | 1e947ba | 2015-01-29 11:59:54 +0000 | [diff] [blame] | 849 | if (is_kernel_in_hyp_mode()) |
| 850 | return 0; |
| 851 | |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 852 | start = start & PAGE_MASK; |
| 853 | end = PAGE_ALIGN(end); |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 854 | |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 855 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { |
| 856 | int err; |
| 857 | |
| 858 | phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); |
Kristina Martsenko | 98732d1 | 2018-01-15 15:23:49 +0000 | [diff] [blame] | 859 | err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD, |
| 860 | virt_addr, virt_addr + PAGE_SIZE, |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 861 | __phys_to_pfn(phys_addr), |
Marc Zyngier | c8dddec | 2016-06-13 15:00:45 +0100 | [diff] [blame] | 862 | prot); |
Christoffer Dall | 40c2729 | 2013-11-15 13:14:12 -0800 | [diff] [blame] | 863 | if (err) |
| 864 | return err; |
| 865 | } |
| 866 | |
| 867 | return 0; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 868 | } |
| 869 | |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 870 | static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, |
| 871 | unsigned long *haddr, pgprot_t prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 872 | { |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 873 | pgd_t *pgd = hyp_pgd; |
| 874 | unsigned long base; |
| 875 | int ret = 0; |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 876 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 877 | mutex_lock(&kvm_hyp_pgd_mutex); |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 878 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 879 | /* |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 880 | * This assumes that we have enough space below the idmap |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 881 | * page to allocate our VAs. If not, the check below will |
| 882 | * kick. A potential alternative would be to detect that |
| 883 | * overflow and switch to an allocation above the idmap. |
| 884 | * |
| 885 | * The allocated size is always a multiple of PAGE_SIZE. |
| 886 | */ |
| 887 | size = PAGE_ALIGN(size + offset_in_page(phys_addr)); |
| 888 | base = io_map_base - size; |
Marc Zyngier | 1bb32a4 | 2017-12-04 16:43:23 +0000 | [diff] [blame] | 889 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 890 | /* |
| 891 | * Verify that BIT(VA_BITS - 1) hasn't been flipped by |
| 892 | * allocating the new area, as it would indicate we've |
| 893 | * overflowed the idmap/IO address range. |
| 894 | */ |
| 895 | if ((base ^ io_map_base) & BIT(VA_BITS - 1)) |
| 896 | ret = -ENOMEM; |
| 897 | else |
| 898 | io_map_base = base; |
| 899 | |
| 900 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| 901 | |
| 902 | if (ret) |
| 903 | goto out; |
| 904 | |
| 905 | if (__kvm_cpu_uses_extended_idmap()) |
| 906 | pgd = boot_hyp_pgd; |
| 907 | |
| 908 | ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(), |
| 909 | base, base + size, |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 910 | __phys_to_pfn(phys_addr), prot); |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 911 | if (ret) |
| 912 | goto out; |
| 913 | |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 914 | *haddr = base + offset_in_page(phys_addr); |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 915 | |
| 916 | out: |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 917 | return ret; |
| 918 | } |
| 919 | |
| 920 | /** |
| 921 | * create_hyp_io_mappings - Map IO into both kernel and HYP |
| 922 | * @phys_addr: The physical start address which gets mapped |
| 923 | * @size: Size of the region being mapped |
| 924 | * @kaddr: Kernel VA for this mapping |
| 925 | * @haddr: HYP VA for this mapping |
| 926 | */ |
| 927 | int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, |
| 928 | void __iomem **kaddr, |
| 929 | void __iomem **haddr) |
| 930 | { |
| 931 | unsigned long addr; |
| 932 | int ret; |
| 933 | |
| 934 | *kaddr = ioremap(phys_addr, size); |
| 935 | if (!*kaddr) |
| 936 | return -ENOMEM; |
| 937 | |
| 938 | if (is_kernel_in_hyp_mode()) { |
| 939 | *haddr = *kaddr; |
| 940 | return 0; |
| 941 | } |
| 942 | |
| 943 | ret = __create_hyp_private_mapping(phys_addr, size, |
| 944 | &addr, PAGE_HYP_DEVICE); |
Marc Zyngier | 1bb32a4 | 2017-12-04 16:43:23 +0000 | [diff] [blame] | 945 | if (ret) { |
| 946 | iounmap(*kaddr); |
| 947 | *kaddr = NULL; |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 948 | *haddr = NULL; |
Marc Zyngier | 1bb32a4 | 2017-12-04 16:43:23 +0000 | [diff] [blame] | 949 | return ret; |
| 950 | } |
| 951 | |
Marc Zyngier | dc2e463 | 2018-02-13 11:00:29 +0000 | [diff] [blame] | 952 | *haddr = (void __iomem *)addr; |
| 953 | return 0; |
| 954 | } |
| 955 | |
| 956 | /** |
| 957 | * create_hyp_exec_mappings - Map an executable range into HYP |
| 958 | * @phys_addr: The physical start address which gets mapped |
| 959 | * @size: Size of the region being mapped |
| 960 | * @haddr: HYP VA for this mapping |
| 961 | */ |
| 962 | int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, |
| 963 | void **haddr) |
| 964 | { |
| 965 | unsigned long addr; |
| 966 | int ret; |
| 967 | |
| 968 | BUG_ON(is_kernel_in_hyp_mode()); |
| 969 | |
| 970 | ret = __create_hyp_private_mapping(phys_addr, size, |
| 971 | &addr, PAGE_HYP_EXEC); |
| 972 | if (ret) { |
| 973 | *haddr = NULL; |
| 974 | return ret; |
| 975 | } |
| 976 | |
| 977 | *haddr = (void *)addr; |
Marc Zyngier | 1bb32a4 | 2017-12-04 16:43:23 +0000 | [diff] [blame] | 978 | return 0; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 979 | } |
| 980 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 981 | /** |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 982 | * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure |
| 983 | * @kvm: The pointer to the KVM structure |
| 984 | * @mmu: The pointer to the s2 MMU structure |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 985 | * |
Zenghui Yu | 8324c3d | 2019-03-25 08:02:05 +0000 | [diff] [blame] | 986 | * Allocates only the stage-2 HW PGD level table(s) of size defined by |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 987 | * stage2_pgd_size(mmu->kvm). |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 988 | * |
| 989 | * Note we don't need locking here as this is only called when the VM is |
| 990 | * created, which can only be done once. |
| 991 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 992 | int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 993 | { |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 994 | phys_addr_t pgd_phys; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 995 | pgd_t *pgd; |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 996 | int cpu; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 997 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 998 | if (mmu->pgd != NULL) { |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 999 | kvm_err("kvm_arch already initialized?\n"); |
| 1000 | return -EINVAL; |
| 1001 | } |
| 1002 | |
Suzuki K Poulose | 9163ee23 | 2016-03-22 17:01:21 +0000 | [diff] [blame] | 1003 | /* Allocate the HW PGD, making sure that each page gets its own refcount */ |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1004 | pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO); |
Suzuki K Poulose | 9163ee23 | 2016-03-22 17:01:21 +0000 | [diff] [blame] | 1005 | if (!pgd) |
Marc Zyngier | a987370 | 2015-03-10 19:06:59 +0000 | [diff] [blame] | 1006 | return -ENOMEM; |
| 1007 | |
Christoffer Dall | e329fb7 | 2018-12-11 15:26:31 +0100 | [diff] [blame] | 1008 | pgd_phys = virt_to_phys(pgd); |
| 1009 | if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm))) |
| 1010 | return -EINVAL; |
| 1011 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1012 | mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran)); |
| 1013 | if (!mmu->last_vcpu_ran) { |
| 1014 | free_pages_exact(pgd, stage2_pgd_size(kvm)); |
| 1015 | return -ENOMEM; |
| 1016 | } |
| 1017 | |
| 1018 | for_each_possible_cpu(cpu) |
| 1019 | *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; |
| 1020 | |
| 1021 | mmu->kvm = kvm; |
| 1022 | mmu->pgd = pgd; |
| 1023 | mmu->pgd_phys = pgd_phys; |
| 1024 | mmu->vmid.vmid_gen = 0; |
| 1025 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1026 | return 0; |
| 1027 | } |
| 1028 | |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 1029 | static void stage2_unmap_memslot(struct kvm *kvm, |
| 1030 | struct kvm_memory_slot *memslot) |
| 1031 | { |
| 1032 | hva_t hva = memslot->userspace_addr; |
| 1033 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; |
| 1034 | phys_addr_t size = PAGE_SIZE * memslot->npages; |
| 1035 | hva_t reg_end = hva + size; |
| 1036 | |
| 1037 | /* |
| 1038 | * A memory region could potentially cover multiple VMAs, and any holes |
| 1039 | * between them, so iterate over all of them to find out if we should |
| 1040 | * unmap any of them. |
| 1041 | * |
| 1042 | * +--------------------------------------------+ |
| 1043 | * +---------------+----------------+ +----------------+ |
| 1044 | * | : VMA 1 | VMA 2 | | VMA 3 : | |
| 1045 | * +---------------+----------------+ +----------------+ |
| 1046 | * | memory region | |
| 1047 | * +--------------------------------------------+ |
| 1048 | */ |
| 1049 | do { |
| 1050 | struct vm_area_struct *vma = find_vma(current->mm, hva); |
| 1051 | hva_t vm_start, vm_end; |
| 1052 | |
| 1053 | if (!vma || vma->vm_start >= reg_end) |
| 1054 | break; |
| 1055 | |
| 1056 | /* |
| 1057 | * Take the intersection of this VMA with the memory region |
| 1058 | */ |
| 1059 | vm_start = max(hva, vma->vm_start); |
| 1060 | vm_end = min(reg_end, vma->vm_end); |
| 1061 | |
| 1062 | if (!(vma->vm_flags & VM_PFNMAP)) { |
| 1063 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1064 | unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start); |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 1065 | } |
| 1066 | hva = vm_end; |
| 1067 | } while (hva < reg_end); |
| 1068 | } |
| 1069 | |
| 1070 | /** |
| 1071 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings |
| 1072 | * @kvm: The struct kvm pointer |
| 1073 | * |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 1074 | * Go through the memregions and unmap any regular RAM |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 1075 | * backing memory already mapped to the VM. |
| 1076 | */ |
| 1077 | void stage2_unmap_vm(struct kvm *kvm) |
| 1078 | { |
| 1079 | struct kvm_memslots *slots; |
| 1080 | struct kvm_memory_slot *memslot; |
| 1081 | int idx; |
| 1082 | |
| 1083 | idx = srcu_read_lock(&kvm->srcu); |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 1084 | mmap_read_lock(current->mm); |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 1085 | spin_lock(&kvm->mmu_lock); |
| 1086 | |
| 1087 | slots = kvm_memslots(kvm); |
| 1088 | kvm_for_each_memslot(memslot, slots) |
| 1089 | stage2_unmap_memslot(kvm, memslot); |
| 1090 | |
| 1091 | spin_unlock(&kvm->mmu_lock); |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 1092 | mmap_read_unlock(current->mm); |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 1093 | srcu_read_unlock(&kvm->srcu, idx); |
| 1094 | } |
| 1095 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1096 | void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1097 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1098 | struct kvm *kvm = mmu->kvm; |
Suzuki K Poulose | 6c0d706 | 2017-05-03 15:17:51 +0100 | [diff] [blame] | 1099 | void *pgd = NULL; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1100 | |
Suzuki K Poulose | 8b3405e | 2017-04-03 15:12:43 +0100 | [diff] [blame] | 1101 | spin_lock(&kvm->mmu_lock); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1102 | if (mmu->pgd) { |
| 1103 | unmap_stage2_range(mmu, 0, kvm_phys_size(kvm)); |
| 1104 | pgd = READ_ONCE(mmu->pgd); |
| 1105 | mmu->pgd = NULL; |
Suzuki K Poulose | 6c0d706 | 2017-05-03 15:17:51 +0100 | [diff] [blame] | 1106 | } |
Suzuki K Poulose | 8b3405e | 2017-04-03 15:12:43 +0100 | [diff] [blame] | 1107 | spin_unlock(&kvm->mmu_lock); |
| 1108 | |
Suzuki K Poulose | 9163ee23 | 2016-03-22 17:01:21 +0000 | [diff] [blame] | 1109 | /* Free the HW pgd, one page at a time */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1110 | if (pgd) { |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1111 | free_pages_exact(pgd, stage2_pgd_size(kvm)); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1112 | free_percpu(mmu->last_vcpu_ran); |
| 1113 | } |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1114 | } |
| 1115 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1116 | static p4d_t *stage2_get_p4d(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache, |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 1117 | phys_addr_t addr) |
| 1118 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1119 | struct kvm *kvm = mmu->kvm; |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 1120 | pgd_t *pgd; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1121 | p4d_t *p4d; |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 1122 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1123 | pgd = mmu->pgd + stage2_pgd_index(kvm, addr); |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1124 | if (stage2_pgd_none(kvm, *pgd)) { |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 1125 | if (!cache) |
| 1126 | return NULL; |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 1127 | p4d = kvm_mmu_memory_cache_alloc(cache); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1128 | stage2_pgd_populate(kvm, pgd, p4d); |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 1129 | get_page(virt_to_page(pgd)); |
| 1130 | } |
| 1131 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1132 | return stage2_p4d_offset(kvm, pgd, addr); |
| 1133 | } |
| 1134 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1135 | static pud_t *stage2_get_pud(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache, |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1136 | phys_addr_t addr) |
| 1137 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1138 | struct kvm *kvm = mmu->kvm; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1139 | p4d_t *p4d; |
| 1140 | pud_t *pud; |
| 1141 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1142 | p4d = stage2_get_p4d(mmu, cache, addr); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1143 | if (stage2_p4d_none(kvm, *p4d)) { |
| 1144 | if (!cache) |
| 1145 | return NULL; |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 1146 | pud = kvm_mmu_memory_cache_alloc(cache); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1147 | stage2_p4d_populate(kvm, p4d, pud); |
| 1148 | get_page(virt_to_page(p4d)); |
| 1149 | } |
| 1150 | |
| 1151 | return stage2_pud_offset(kvm, p4d, addr); |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 1152 | } |
| 1153 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1154 | static pmd_t *stage2_get_pmd(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache *cache, |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1155 | phys_addr_t addr) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1156 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1157 | struct kvm *kvm = mmu->kvm; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1158 | pud_t *pud; |
| 1159 | pmd_t *pmd; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1160 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1161 | pud = stage2_get_pud(mmu, cache, addr); |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1162 | if (!pud || stage2_pud_huge(kvm, *pud)) |
Marc Zyngier | d6dbdd3 | 2017-06-05 19:17:18 +0100 | [diff] [blame] | 1163 | return NULL; |
| 1164 | |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1165 | if (stage2_pud_none(kvm, *pud)) { |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1166 | if (!cache) |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1167 | return NULL; |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 1168 | pmd = kvm_mmu_memory_cache_alloc(cache); |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1169 | stage2_pud_populate(kvm, pud, pmd); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1170 | get_page(virt_to_page(pud)); |
Marc Zyngier | c62ee2b | 2012-10-15 11:27:37 +0100 | [diff] [blame] | 1171 | } |
| 1172 | |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1173 | return stage2_pmd_offset(kvm, pud, addr); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1174 | } |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1175 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1176 | static int stage2_set_pmd_huge(struct kvm_s2_mmu *mmu, |
| 1177 | struct kvm_mmu_memory_cache *cache, |
| 1178 | phys_addr_t addr, const pmd_t *new_pmd) |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1179 | { |
| 1180 | pmd_t *pmd, old_pmd; |
| 1181 | |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1182 | retry: |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1183 | pmd = stage2_get_pmd(mmu, cache, addr); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1184 | VM_BUG_ON(!pmd); |
| 1185 | |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1186 | old_pmd = *pmd; |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1187 | /* |
| 1188 | * Multiple vcpus faulting on the same PMD entry, can |
| 1189 | * lead to them sequentially updating the PMD with the |
| 1190 | * same value. Following the break-before-make |
| 1191 | * (pmd_clear() followed by tlb_flush()) process can |
| 1192 | * hinder forward progress due to refaults generated |
| 1193 | * on missing translations. |
| 1194 | * |
| 1195 | * Skip updating the page table if the entry is |
| 1196 | * unchanged. |
| 1197 | */ |
| 1198 | if (pmd_val(old_pmd) == pmd_val(*new_pmd)) |
| 1199 | return 0; |
| 1200 | |
Marc Zyngier | d4b9e07 | 2016-04-28 16:16:31 +0100 | [diff] [blame] | 1201 | if (pmd_present(old_pmd)) { |
Punit Agrawal | 86658b8 | 2018-08-13 11:43:50 +0100 | [diff] [blame] | 1202 | /* |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1203 | * If we already have PTE level mapping for this block, |
| 1204 | * we must unmap it to avoid inconsistent TLB state and |
| 1205 | * leaking the table page. We could end up in this situation |
| 1206 | * if the memory slot was marked for dirty logging and was |
| 1207 | * reverted, leaving PTE level mappings for the pages accessed |
| 1208 | * during the period. So, unmap the PTE level mapping for this |
| 1209 | * block and retry, as we could have released the upper level |
| 1210 | * table in the process. |
Punit Agrawal | 86658b8 | 2018-08-13 11:43:50 +0100 | [diff] [blame] | 1211 | * |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1212 | * Normal THP split/merge follows mmu_notifier callbacks and do |
| 1213 | * get handled accordingly. |
Punit Agrawal | 86658b8 | 2018-08-13 11:43:50 +0100 | [diff] [blame] | 1214 | */ |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1215 | if (!pmd_thp_or_huge(old_pmd)) { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1216 | unmap_stage2_range(mmu, addr & S2_PMD_MASK, S2_PMD_SIZE); |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1217 | goto retry; |
| 1218 | } |
Punit Agrawal | 86658b8 | 2018-08-13 11:43:50 +0100 | [diff] [blame] | 1219 | /* |
| 1220 | * Mapping in huge pages should only happen through a |
| 1221 | * fault. If a page is merged into a transparent huge |
| 1222 | * page, the individual subpages of that huge page |
| 1223 | * should be unmapped through MMU notifiers before we |
| 1224 | * get here. |
| 1225 | * |
| 1226 | * Merging of CompoundPages is not supported; they |
| 1227 | * should become splitting first, unmapped, merged, |
| 1228 | * and mapped back in on-demand. |
| 1229 | */ |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1230 | WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); |
Marc Zyngier | d4b9e07 | 2016-04-28 16:16:31 +0100 | [diff] [blame] | 1231 | pmd_clear(pmd); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 1232 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL); |
Marc Zyngier | d4b9e07 | 2016-04-28 16:16:31 +0100 | [diff] [blame] | 1233 | } else { |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1234 | get_page(virt_to_page(pmd)); |
Marc Zyngier | d4b9e07 | 2016-04-28 16:16:31 +0100 | [diff] [blame] | 1235 | } |
| 1236 | |
| 1237 | kvm_set_pmd(pmd, *new_pmd); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1238 | return 0; |
| 1239 | } |
| 1240 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1241 | static int stage2_set_pud_huge(struct kvm_s2_mmu *mmu, |
| 1242 | struct kvm_mmu_memory_cache *cache, |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1243 | phys_addr_t addr, const pud_t *new_pudp) |
| 1244 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1245 | struct kvm *kvm = mmu->kvm; |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1246 | pud_t *pudp, old_pud; |
| 1247 | |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1248 | retry: |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1249 | pudp = stage2_get_pud(mmu, cache, addr); |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1250 | VM_BUG_ON(!pudp); |
| 1251 | |
| 1252 | old_pud = *pudp; |
| 1253 | |
| 1254 | /* |
| 1255 | * A large number of vcpus faulting on the same stage 2 entry, |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1256 | * can lead to a refault due to the stage2_pud_clear()/tlb_flush(). |
| 1257 | * Skip updating the page tables if there is no change. |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1258 | */ |
| 1259 | if (pud_val(old_pud) == pud_val(*new_pudp)) |
| 1260 | return 0; |
| 1261 | |
| 1262 | if (stage2_pud_present(kvm, old_pud)) { |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1263 | /* |
| 1264 | * If we already have table level mapping for this block, unmap |
| 1265 | * the range for this block and retry. |
| 1266 | */ |
| 1267 | if (!stage2_pud_huge(kvm, old_pud)) { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1268 | unmap_stage2_range(mmu, addr & S2_PUD_MASK, S2_PUD_SIZE); |
Suzuki K Poulose | 3c3736c | 2019-03-20 14:57:19 +0000 | [diff] [blame] | 1269 | goto retry; |
| 1270 | } |
| 1271 | |
| 1272 | WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp)); |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1273 | stage2_pud_clear(kvm, pudp); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 1274 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL); |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1275 | } else { |
| 1276 | get_page(virt_to_page(pudp)); |
| 1277 | } |
| 1278 | |
| 1279 | kvm_set_pud(pudp, *new_pudp); |
| 1280 | return 0; |
| 1281 | } |
| 1282 | |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1283 | /* |
| 1284 | * stage2_get_leaf_entry - walk the stage2 VM page tables and return |
| 1285 | * true if a valid and present leaf-entry is found. A pointer to the |
| 1286 | * leaf-entry is returned in the appropriate level variable - pudpp, |
| 1287 | * pmdpp, ptepp. |
| 1288 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1289 | static bool stage2_get_leaf_entry(struct kvm_s2_mmu *mmu, phys_addr_t addr, |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1290 | pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) |
Marc Zyngier | 7a3796d | 2017-10-23 17:11:21 +0100 | [diff] [blame] | 1291 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1292 | struct kvm *kvm = mmu->kvm; |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1293 | pud_t *pudp; |
Marc Zyngier | 7a3796d | 2017-10-23 17:11:21 +0100 | [diff] [blame] | 1294 | pmd_t *pmdp; |
| 1295 | pte_t *ptep; |
| 1296 | |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1297 | *pudpp = NULL; |
| 1298 | *pmdpp = NULL; |
| 1299 | *ptepp = NULL; |
| 1300 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1301 | pudp = stage2_get_pud(mmu, NULL, addr); |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1302 | if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp)) |
| 1303 | return false; |
| 1304 | |
| 1305 | if (stage2_pud_huge(kvm, *pudp)) { |
| 1306 | *pudpp = pudp; |
| 1307 | return true; |
| 1308 | } |
| 1309 | |
| 1310 | pmdp = stage2_pmd_offset(kvm, pudp, addr); |
Marc Zyngier | 7a3796d | 2017-10-23 17:11:21 +0100 | [diff] [blame] | 1311 | if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp)) |
| 1312 | return false; |
| 1313 | |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1314 | if (pmd_thp_or_huge(*pmdp)) { |
| 1315 | *pmdpp = pmdp; |
| 1316 | return true; |
| 1317 | } |
Marc Zyngier | 7a3796d | 2017-10-23 17:11:21 +0100 | [diff] [blame] | 1318 | |
| 1319 | ptep = pte_offset_kernel(pmdp, addr); |
| 1320 | if (!ptep || pte_none(*ptep) || !pte_present(*ptep)) |
| 1321 | return false; |
| 1322 | |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1323 | *ptepp = ptep; |
| 1324 | return true; |
| 1325 | } |
| 1326 | |
Paolo Bonzini | 0378dae | 2020-08-09 12:58:23 -0400 | [diff] [blame] | 1327 | static bool stage2_is_exec(struct kvm_s2_mmu *mmu, phys_addr_t addr, unsigned long sz) |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1328 | { |
| 1329 | pud_t *pudp; |
| 1330 | pmd_t *pmdp; |
| 1331 | pte_t *ptep; |
| 1332 | bool found; |
| 1333 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1334 | found = stage2_get_leaf_entry(mmu, addr, &pudp, &pmdp, &ptep); |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1335 | if (!found) |
| 1336 | return false; |
| 1337 | |
| 1338 | if (pudp) |
Will Deacon | b757b47 | 2020-07-23 11:17:14 +0100 | [diff] [blame] | 1339 | return sz <= PUD_SIZE && kvm_s2pud_exec(pudp); |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1340 | else if (pmdp) |
Will Deacon | b757b47 | 2020-07-23 11:17:14 +0100 | [diff] [blame] | 1341 | return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp); |
Punit Agrawal | 86d1c55 | 2018-12-11 17:10:38 +0000 | [diff] [blame] | 1342 | else |
Will Deacon | b757b47 | 2020-07-23 11:17:14 +0100 | [diff] [blame] | 1343 | return sz == PAGE_SIZE && kvm_s2pte_exec(ptep); |
Marc Zyngier | 7a3796d | 2017-10-23 17:11:21 +0100 | [diff] [blame] | 1344 | } |
| 1345 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1346 | static int stage2_set_pte(struct kvm_s2_mmu *mmu, |
| 1347 | struct kvm_mmu_memory_cache *cache, |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1348 | phys_addr_t addr, const pte_t *new_pte, |
| 1349 | unsigned long flags) |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1350 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1351 | struct kvm *kvm = mmu->kvm; |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1352 | pud_t *pud; |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1353 | pmd_t *pmd; |
| 1354 | pte_t *pte, old_pte; |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1355 | bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP; |
| 1356 | bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE; |
| 1357 | |
| 1358 | VM_BUG_ON(logging_active && !cache); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1359 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 1360 | /* Create stage-2 page table mapping - Levels 0 and 1 */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1361 | pud = stage2_get_pud(mmu, cache, addr); |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1362 | if (!pud) { |
| 1363 | /* |
| 1364 | * Ignore calls from kvm_set_spte_hva for unallocated |
| 1365 | * address ranges. |
| 1366 | */ |
| 1367 | return 0; |
| 1368 | } |
| 1369 | |
| 1370 | /* |
| 1371 | * While dirty page logging - dissolve huge PUD, then continue |
| 1372 | * on to allocate page. |
| 1373 | */ |
| 1374 | if (logging_active) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1375 | stage2_dissolve_pud(mmu, addr, pud); |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1376 | |
| 1377 | if (stage2_pud_none(kvm, *pud)) { |
| 1378 | if (!cache) |
| 1379 | return 0; /* ignore calls from kvm_set_spte_hva */ |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 1380 | pmd = kvm_mmu_memory_cache_alloc(cache); |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1381 | stage2_pud_populate(kvm, pud, pmd); |
| 1382 | get_page(virt_to_page(pud)); |
| 1383 | } |
| 1384 | |
| 1385 | pmd = stage2_pmd_offset(kvm, pud, addr); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1386 | if (!pmd) { |
| 1387 | /* |
| 1388 | * Ignore calls from kvm_set_spte_hva for unallocated |
| 1389 | * address ranges. |
| 1390 | */ |
| 1391 | return 0; |
| 1392 | } |
| 1393 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1394 | /* |
| 1395 | * While dirty page logging - dissolve huge PMD, then continue on to |
| 1396 | * allocate page. |
| 1397 | */ |
| 1398 | if (logging_active) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1399 | stage2_dissolve_pmd(mmu, addr, pmd); |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1400 | |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1401 | /* Create stage-2 page mappings - Level 2 */ |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1402 | if (pmd_none(*pmd)) { |
| 1403 | if (!cache) |
| 1404 | return 0; /* ignore calls from kvm_set_spte_hva */ |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 1405 | pte = kvm_mmu_memory_cache_alloc(cache); |
Marc Zyngier | 0db9dd8 | 2018-06-27 15:51:05 +0100 | [diff] [blame] | 1406 | kvm_pmd_populate(pmd, pte); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1407 | get_page(virt_to_page(pmd)); |
Marc Zyngier | c62ee2b | 2012-10-15 11:27:37 +0100 | [diff] [blame] | 1408 | } |
| 1409 | |
| 1410 | pte = pte_offset_kernel(pmd, addr); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1411 | |
| 1412 | if (iomap && pte_present(*pte)) |
| 1413 | return -EFAULT; |
| 1414 | |
| 1415 | /* Create 2nd stage page table mapping - Level 3 */ |
| 1416 | old_pte = *pte; |
Marc Zyngier | d4b9e07 | 2016-04-28 16:16:31 +0100 | [diff] [blame] | 1417 | if (pte_present(old_pte)) { |
Punit Agrawal | 976d34e | 2018-08-13 11:43:51 +0100 | [diff] [blame] | 1418 | /* Skip page table update if there is no change */ |
| 1419 | if (pte_val(old_pte) == pte_val(*new_pte)) |
| 1420 | return 0; |
| 1421 | |
Marc Zyngier | d4b9e07 | 2016-04-28 16:16:31 +0100 | [diff] [blame] | 1422 | kvm_set_pte(pte, __pte(0)); |
Marc Zyngier | efaa5b9 | 2019-01-02 12:34:25 +0000 | [diff] [blame] | 1423 | kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL); |
Marc Zyngier | d4b9e07 | 2016-04-28 16:16:31 +0100 | [diff] [blame] | 1424 | } else { |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1425 | get_page(virt_to_page(pte)); |
Marc Zyngier | d4b9e07 | 2016-04-28 16:16:31 +0100 | [diff] [blame] | 1426 | } |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1427 | |
Marc Zyngier | d4b9e07 | 2016-04-28 16:16:31 +0100 | [diff] [blame] | 1428 | kvm_set_pte(pte, *new_pte); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1429 | return 0; |
| 1430 | } |
| 1431 | |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 1432 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 1433 | static int stage2_ptep_test_and_clear_young(pte_t *pte) |
| 1434 | { |
| 1435 | if (pte_young(*pte)) { |
| 1436 | *pte = pte_mkold(*pte); |
| 1437 | return 1; |
| 1438 | } |
| 1439 | return 0; |
| 1440 | } |
| 1441 | #else |
| 1442 | static int stage2_ptep_test_and_clear_young(pte_t *pte) |
| 1443 | { |
| 1444 | return __ptep_test_and_clear_young(pte); |
| 1445 | } |
| 1446 | #endif |
| 1447 | |
| 1448 | static int stage2_pmdp_test_and_clear_young(pmd_t *pmd) |
| 1449 | { |
| 1450 | return stage2_ptep_test_and_clear_young((pte_t *)pmd); |
| 1451 | } |
| 1452 | |
Punit Agrawal | 35a6396 | 2018-12-11 17:10:40 +0000 | [diff] [blame] | 1453 | static int stage2_pudp_test_and_clear_young(pud_t *pud) |
| 1454 | { |
| 1455 | return stage2_ptep_test_and_clear_young((pte_t *)pud); |
| 1456 | } |
| 1457 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1458 | /** |
| 1459 | * kvm_phys_addr_ioremap - map a device range to guest IPA |
| 1460 | * |
| 1461 | * @kvm: The KVM pointer |
| 1462 | * @guest_ipa: The IPA at which to insert the mapping |
| 1463 | * @pa: The physical address of the device |
| 1464 | * @size: The size of the mapping |
| 1465 | */ |
| 1466 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
Ard Biesheuvel | c40f2f8 | 2014-09-17 14:56:18 -0700 | [diff] [blame] | 1467 | phys_addr_t pa, unsigned long size, bool writable) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1468 | { |
| 1469 | phys_addr_t addr, end; |
| 1470 | int ret = 0; |
| 1471 | unsigned long pfn; |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 1472 | struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, }; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1473 | |
| 1474 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; |
| 1475 | pfn = __phys_to_pfn(pa); |
| 1476 | |
| 1477 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { |
Punit Agrawal | f8df733 | 2018-12-11 17:10:36 +0000 | [diff] [blame] | 1478 | pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1479 | |
Ard Biesheuvel | c40f2f8 | 2014-09-17 14:56:18 -0700 | [diff] [blame] | 1480 | if (writable) |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 1481 | pte = kvm_s2pte_mkwrite(pte); |
Ard Biesheuvel | c40f2f8 | 2014-09-17 14:56:18 -0700 | [diff] [blame] | 1482 | |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 1483 | ret = kvm_mmu_topup_memory_cache(&cache, |
| 1484 | kvm_mmu_cache_min_pages(kvm)); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1485 | if (ret) |
| 1486 | goto out; |
| 1487 | spin_lock(&kvm->mmu_lock); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1488 | ret = stage2_set_pte(&kvm->arch.mmu, &cache, addr, &pte, |
| 1489 | KVM_S2PTE_FLAG_IS_IOMAP); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1490 | spin_unlock(&kvm->mmu_lock); |
| 1491 | if (ret) |
| 1492 | goto out; |
| 1493 | |
| 1494 | pfn++; |
| 1495 | } |
| 1496 | |
| 1497 | out: |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 1498 | kvm_mmu_free_memory_cache(&cache); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 1499 | return ret; |
| 1500 | } |
| 1501 | |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1502 | /** |
| 1503 | * stage2_wp_ptes - write protect PMD range |
| 1504 | * @pmd: pointer to pmd entry |
| 1505 | * @addr: range start address |
| 1506 | * @end: range end address |
| 1507 | */ |
| 1508 | static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) |
| 1509 | { |
| 1510 | pte_t *pte; |
| 1511 | |
| 1512 | pte = pte_offset_kernel(pmd, addr); |
| 1513 | do { |
| 1514 | if (!pte_none(*pte)) { |
| 1515 | if (!kvm_s2pte_readonly(pte)) |
| 1516 | kvm_set_s2pte_readonly(pte); |
| 1517 | } |
| 1518 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 1519 | } |
| 1520 | |
| 1521 | /** |
| 1522 | * stage2_wp_pmds - write protect PUD range |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1523 | * kvm: kvm instance for the VM |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1524 | * @pud: pointer to pud entry |
| 1525 | * @addr: range start address |
| 1526 | * @end: range end address |
| 1527 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1528 | static void stage2_wp_pmds(struct kvm_s2_mmu *mmu, pud_t *pud, |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1529 | phys_addr_t addr, phys_addr_t end) |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1530 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1531 | struct kvm *kvm = mmu->kvm; |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1532 | pmd_t *pmd; |
| 1533 | phys_addr_t next; |
| 1534 | |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1535 | pmd = stage2_pmd_offset(kvm, pud, addr); |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1536 | |
| 1537 | do { |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1538 | next = stage2_pmd_addr_end(kvm, addr, end); |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1539 | if (!pmd_none(*pmd)) { |
Suzuki K Poulose | bbb3b6b | 2016-03-01 12:00:39 +0000 | [diff] [blame] | 1540 | if (pmd_thp_or_huge(*pmd)) { |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1541 | if (!kvm_s2pmd_readonly(pmd)) |
| 1542 | kvm_set_s2pmd_readonly(pmd); |
| 1543 | } else { |
| 1544 | stage2_wp_ptes(pmd, addr, next); |
| 1545 | } |
| 1546 | } |
| 1547 | } while (pmd++, addr = next, addr != end); |
| 1548 | } |
| 1549 | |
| 1550 | /** |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1551 | * stage2_wp_puds - write protect P4D range |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1552 | * @p4d: pointer to p4d entry |
Zenghui Yu | 8324c3d | 2019-03-25 08:02:05 +0000 | [diff] [blame] | 1553 | * @addr: range start address |
| 1554 | * @end: range end address |
| 1555 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1556 | static void stage2_wp_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d, |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1557 | phys_addr_t addr, phys_addr_t end) |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1558 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1559 | struct kvm *kvm = mmu->kvm; |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1560 | pud_t *pud; |
| 1561 | phys_addr_t next; |
| 1562 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1563 | pud = stage2_pud_offset(kvm, p4d, addr); |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1564 | do { |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1565 | next = stage2_pud_addr_end(kvm, addr, end); |
| 1566 | if (!stage2_pud_none(kvm, *pud)) { |
Punit Agrawal | 4ea5af5 | 2018-12-11 17:10:37 +0000 | [diff] [blame] | 1567 | if (stage2_pud_huge(kvm, *pud)) { |
| 1568 | if (!kvm_s2pud_readonly(pud)) |
| 1569 | kvm_set_s2pud_readonly(pud); |
| 1570 | } else { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1571 | stage2_wp_pmds(mmu, pud, addr, next); |
Punit Agrawal | 4ea5af5 | 2018-12-11 17:10:37 +0000 | [diff] [blame] | 1572 | } |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1573 | } |
| 1574 | } while (pud++, addr = next, addr != end); |
| 1575 | } |
| 1576 | |
| 1577 | /** |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1578 | * stage2_wp_p4ds - write protect PGD range |
| 1579 | * @pgd: pointer to pgd entry |
| 1580 | * @addr: range start address |
| 1581 | * @end: range end address |
| 1582 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1583 | static void stage2_wp_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd, |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1584 | phys_addr_t addr, phys_addr_t end) |
| 1585 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1586 | struct kvm *kvm = mmu->kvm; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1587 | p4d_t *p4d; |
| 1588 | phys_addr_t next; |
| 1589 | |
| 1590 | p4d = stage2_p4d_offset(kvm, pgd, addr); |
| 1591 | do { |
| 1592 | next = stage2_p4d_addr_end(kvm, addr, end); |
| 1593 | if (!stage2_p4d_none(kvm, *p4d)) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1594 | stage2_wp_puds(mmu, p4d, addr, next); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1595 | } while (p4d++, addr = next, addr != end); |
| 1596 | } |
| 1597 | |
| 1598 | /** |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1599 | * stage2_wp_range() - write protect stage2 memory region range |
| 1600 | * @kvm: The KVM pointer |
| 1601 | * @addr: Start address of range |
| 1602 | * @end: End address of range |
| 1603 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1604 | static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1605 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1606 | struct kvm *kvm = mmu->kvm; |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1607 | pgd_t *pgd; |
| 1608 | phys_addr_t next; |
| 1609 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1610 | pgd = mmu->pgd + stage2_pgd_index(kvm, addr); |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1611 | do { |
| 1612 | /* |
| 1613 | * Release kvm_mmu_lock periodically if the memory region is |
| 1614 | * large. Otherwise, we may see kernel panics with |
Christoffer Dall | 227ea81 | 2015-01-23 10:49:31 +0100 | [diff] [blame] | 1615 | * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, |
| 1616 | * CONFIG_LOCKDEP. Additionally, holding the lock too long |
Suzuki K Poulose | 0c428a6a | 2017-05-16 10:34:55 +0100 | [diff] [blame] | 1617 | * will also starve other vCPUs. We have to also make sure |
| 1618 | * that the page tables are not freed while we released |
| 1619 | * the lock. |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1620 | */ |
Suzuki K Poulose | 0c428a6a | 2017-05-16 10:34:55 +0100 | [diff] [blame] | 1621 | cond_resched_lock(&kvm->mmu_lock); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1622 | if (!READ_ONCE(mmu->pgd)) |
Suzuki K Poulose | 0c428a6a | 2017-05-16 10:34:55 +0100 | [diff] [blame] | 1623 | break; |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 1624 | next = stage2_pgd_addr_end(kvm, addr, end); |
| 1625 | if (stage2_pgd_present(kvm, *pgd)) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1626 | stage2_wp_p4ds(mmu, pgd, addr, next); |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1627 | } while (pgd++, addr = next, addr != end); |
| 1628 | } |
| 1629 | |
| 1630 | /** |
| 1631 | * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot |
| 1632 | * @kvm: The KVM pointer |
| 1633 | * @slot: The memory slot to write protect |
| 1634 | * |
| 1635 | * Called to start logging dirty pages after memory region |
| 1636 | * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns |
Punit Agrawal | 4ea5af5 | 2018-12-11 17:10:37 +0000 | [diff] [blame] | 1637 | * all present PUD, PMD and PTEs are write protected in the memory region. |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1638 | * Afterwards read of dirty page log can be called. |
| 1639 | * |
| 1640 | * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, |
| 1641 | * serializing operations for VM memory regions. |
| 1642 | */ |
| 1643 | void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) |
| 1644 | { |
Paolo Bonzini | 9f6b802 | 2015-05-17 16:20:07 +0200 | [diff] [blame] | 1645 | struct kvm_memslots *slots = kvm_memslots(kvm); |
| 1646 | struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); |
Sean Christopherson | 0577d1a | 2020-02-18 13:07:31 -0800 | [diff] [blame] | 1647 | phys_addr_t start, end; |
| 1648 | |
| 1649 | if (WARN_ON_ONCE(!memslot)) |
| 1650 | return; |
| 1651 | |
| 1652 | start = memslot->base_gfn << PAGE_SHIFT; |
| 1653 | end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1654 | |
| 1655 | spin_lock(&kvm->mmu_lock); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1656 | stage2_wp_range(&kvm->arch.mmu, start, end); |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1657 | spin_unlock(&kvm->mmu_lock); |
| 1658 | kvm_flush_remote_tlbs(kvm); |
| 1659 | } |
Mario Smarduch | 53c810c | 2015-01-15 15:58:57 -0800 | [diff] [blame] | 1660 | |
| 1661 | /** |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1662 | * kvm_mmu_write_protect_pt_masked() - write protect dirty pages |
Mario Smarduch | 53c810c | 2015-01-15 15:58:57 -0800 | [diff] [blame] | 1663 | * @kvm: The KVM pointer |
| 1664 | * @slot: The memory slot associated with mask |
| 1665 | * @gfn_offset: The gfn offset in memory slot |
| 1666 | * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory |
| 1667 | * slot to be write protected |
| 1668 | * |
| 1669 | * Walks bits set in mask write protects the associated pte's. Caller must |
| 1670 | * acquire kvm_mmu_lock. |
| 1671 | */ |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1672 | static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, |
Mario Smarduch | 53c810c | 2015-01-15 15:58:57 -0800 | [diff] [blame] | 1673 | struct kvm_memory_slot *slot, |
| 1674 | gfn_t gfn_offset, unsigned long mask) |
| 1675 | { |
| 1676 | phys_addr_t base_gfn = slot->base_gfn + gfn_offset; |
| 1677 | phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; |
| 1678 | phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; |
| 1679 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1680 | stage2_wp_range(&kvm->arch.mmu, start, end); |
Mario Smarduch | 53c810c | 2015-01-15 15:58:57 -0800 | [diff] [blame] | 1681 | } |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 1682 | |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1683 | /* |
| 1684 | * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected |
| 1685 | * dirty pages. |
| 1686 | * |
| 1687 | * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to |
| 1688 | * enable dirty logging for them. |
| 1689 | */ |
| 1690 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
| 1691 | struct kvm_memory_slot *slot, |
| 1692 | gfn_t gfn_offset, unsigned long mask) |
| 1693 | { |
| 1694 | kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); |
| 1695 | } |
| 1696 | |
Marc Zyngier | 17ab9d5 | 2017-10-23 17:11:22 +0100 | [diff] [blame] | 1697 | static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) |
Marc Zyngier | 0d3e4d4 | 2015-01-05 21:13:24 +0000 | [diff] [blame] | 1698 | { |
Marc Zyngier | 17ab9d5 | 2017-10-23 17:11:22 +0100 | [diff] [blame] | 1699 | __clean_dcache_guest_page(pfn, size); |
Marc Zyngier | a15f693 | 2017-10-23 17:11:15 +0100 | [diff] [blame] | 1700 | } |
| 1701 | |
Marc Zyngier | 17ab9d5 | 2017-10-23 17:11:22 +0100 | [diff] [blame] | 1702 | static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size) |
Marc Zyngier | a15f693 | 2017-10-23 17:11:15 +0100 | [diff] [blame] | 1703 | { |
Marc Zyngier | 17ab9d5 | 2017-10-23 17:11:22 +0100 | [diff] [blame] | 1704 | __invalidate_icache_guest_page(pfn, size); |
Marc Zyngier | 0d3e4d4 | 2015-01-05 21:13:24 +0000 | [diff] [blame] | 1705 | } |
| 1706 | |
James Morse | 1559b75 | 2019-12-17 12:38:09 +0000 | [diff] [blame] | 1707 | static void kvm_send_hwpoison_signal(unsigned long address, short lsb) |
James Morse | 196f878 | 2017-06-20 17:11:48 +0100 | [diff] [blame] | 1708 | { |
Eric W. Biederman | 795a837 | 2018-04-16 13:39:10 -0500 | [diff] [blame] | 1709 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); |
James Morse | 196f878 | 2017-06-20 17:11:48 +0100 | [diff] [blame] | 1710 | } |
| 1711 | |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1712 | static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, |
| 1713 | unsigned long hva, |
| 1714 | unsigned long map_size) |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1715 | { |
Shaokun Zhang | c2be79a | 2019-02-19 17:22:21 +0800 | [diff] [blame] | 1716 | gpa_t gpa_start; |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1717 | hva_t uaddr_start, uaddr_end; |
| 1718 | size_t size; |
| 1719 | |
Suzuki K Poulose | 9f28361 | 2020-05-07 20:35:45 +0800 | [diff] [blame] | 1720 | /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ |
| 1721 | if (map_size == PAGE_SIZE) |
| 1722 | return true; |
| 1723 | |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1724 | size = memslot->npages * PAGE_SIZE; |
| 1725 | |
| 1726 | gpa_start = memslot->base_gfn << PAGE_SHIFT; |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1727 | |
| 1728 | uaddr_start = memslot->userspace_addr; |
| 1729 | uaddr_end = uaddr_start + size; |
| 1730 | |
| 1731 | /* |
| 1732 | * Pages belonging to memslots that don't have the same alignment |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1733 | * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 |
| 1734 | * PMD/PUD entries, because we'll end up mapping the wrong pages. |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1735 | * |
| 1736 | * Consider a layout like the following: |
| 1737 | * |
| 1738 | * memslot->userspace_addr: |
| 1739 | * +-----+--------------------+--------------------+---+ |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1740 | * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1741 | * +-----+--------------------+--------------------+---+ |
| 1742 | * |
Suzuki K Poulose | 9f28361 | 2020-05-07 20:35:45 +0800 | [diff] [blame] | 1743 | * memslot->base_gfn << PAGE_SHIFT: |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1744 | * +---+--------------------+--------------------+-----+ |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1745 | * |abc|def Stage-2 block | Stage-2 block |tvxyz| |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1746 | * +---+--------------------+--------------------+-----+ |
| 1747 | * |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1748 | * If we create those stage-2 blocks, we'll end up with this incorrect |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1749 | * mapping: |
| 1750 | * d -> f |
| 1751 | * e -> g |
| 1752 | * f -> h |
| 1753 | */ |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1754 | if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1755 | return false; |
| 1756 | |
| 1757 | /* |
| 1758 | * Next, let's make sure we're not trying to map anything not covered |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1759 | * by the memslot. This means we have to prohibit block size mappings |
| 1760 | * for the beginning and end of a non-block aligned and non-block sized |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1761 | * memory slot (illustrated by the head and tail parts of the |
| 1762 | * userspace view above containing pages 'abcde' and 'xyz', |
| 1763 | * respectively). |
| 1764 | * |
| 1765 | * Note that it doesn't matter if we do the check using the |
| 1766 | * userspace_addr or the base_gfn, as both are equally aligned (per |
| 1767 | * the check above) and equally sized. |
| 1768 | */ |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1769 | return (hva & ~(map_size - 1)) >= uaddr_start && |
| 1770 | (hva & ~(map_size - 1)) + map_size <= uaddr_end; |
Christoffer Dall | 6794ad5 | 2018-11-02 08:53:22 +0100 | [diff] [blame] | 1771 | } |
| 1772 | |
Suzuki K Poulose | 0529c90 | 2020-05-07 20:35:46 +0800 | [diff] [blame] | 1773 | /* |
| 1774 | * Check if the given hva is backed by a transparent huge page (THP) and |
| 1775 | * whether it can be mapped using block mapping in stage2. If so, adjust |
| 1776 | * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently |
| 1777 | * supported. This will need to be updated to support other THP sizes. |
| 1778 | * |
| 1779 | * Returns the size of the mapping. |
| 1780 | */ |
| 1781 | static unsigned long |
| 1782 | transparent_hugepage_adjust(struct kvm_memory_slot *memslot, |
| 1783 | unsigned long hva, kvm_pfn_t *pfnp, |
| 1784 | phys_addr_t *ipap) |
| 1785 | { |
| 1786 | kvm_pfn_t pfn = *pfnp; |
| 1787 | |
| 1788 | /* |
| 1789 | * Make sure the adjustment is done only for THP pages. Also make |
| 1790 | * sure that the HVA and IPA are sufficiently aligned and that the |
| 1791 | * block map is contained within the memslot. |
| 1792 | */ |
| 1793 | if (kvm_is_transparent_hugepage(pfn) && |
| 1794 | fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { |
| 1795 | /* |
| 1796 | * The address we faulted on is backed by a transparent huge |
| 1797 | * page. However, because we map the compound huge page and |
| 1798 | * not the individual tail page, we need to transfer the |
| 1799 | * refcount to the head page. We have to be careful that the |
| 1800 | * THP doesn't start to split while we are adjusting the |
| 1801 | * refcounts. |
| 1802 | * |
| 1803 | * We are sure this doesn't happen, because mmu_notifier_retry |
| 1804 | * was successful and we are holding the mmu_lock, so if this |
| 1805 | * THP is trying to split, it will be blocked in the mmu |
| 1806 | * notifier before touching any of the pages, specifically |
| 1807 | * before being able to call __split_huge_page_refcount(). |
| 1808 | * |
| 1809 | * We can therefore safely transfer the refcount from PG_tail |
| 1810 | * to PG_head and switch the pfn from a tail page to the head |
| 1811 | * page accordingly. |
| 1812 | */ |
| 1813 | *ipap &= PMD_MASK; |
| 1814 | kvm_release_pfn_clean(pfn); |
| 1815 | pfn &= ~(PTRS_PER_PMD - 1); |
| 1816 | kvm_get_pfn(pfn); |
| 1817 | *pfnp = pfn; |
| 1818 | |
| 1819 | return PMD_SIZE; |
| 1820 | } |
| 1821 | |
| 1822 | /* Use page mapping if we cannot use block mapping. */ |
| 1823 | return PAGE_SIZE; |
| 1824 | } |
| 1825 | |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1826 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 1827 | struct kvm_memory_slot *memslot, unsigned long hva, |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1828 | unsigned long fault_status) |
| 1829 | { |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1830 | int ret; |
Punit Agrawal | 6396b85 | 2018-12-11 17:10:35 +0000 | [diff] [blame] | 1831 | bool write_fault, writable, force_pte = false; |
| 1832 | bool exec_fault, needs_exec; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1833 | unsigned long mmu_seq; |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1834 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1835 | struct kvm *kvm = vcpu->kvm; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1836 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1837 | struct vm_area_struct *vma; |
James Morse | 1559b75 | 2019-12-17 12:38:09 +0000 | [diff] [blame] | 1838 | short vma_shift; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 1839 | kvm_pfn_t pfn; |
Kim Phillips | b886576 | 2014-06-26 01:45:51 +0100 | [diff] [blame] | 1840 | pgprot_t mem_type = PAGE_S2; |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1841 | bool logging_active = memslot_is_logging(memslot); |
Punit Agrawal | 3f58bf6 | 2018-12-11 17:10:34 +0000 | [diff] [blame] | 1842 | unsigned long vma_pagesize, flags = 0; |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1843 | struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1844 | |
Ard Biesheuvel | a7d079c | 2014-09-09 11:27:09 +0100 | [diff] [blame] | 1845 | write_fault = kvm_is_write_fault(vcpu); |
Marc Zyngier | d0e22b4 | 2017-10-23 17:11:19 +0100 | [diff] [blame] | 1846 | exec_fault = kvm_vcpu_trap_is_iabt(vcpu); |
| 1847 | VM_BUG_ON(write_fault && exec_fault); |
| 1848 | |
| 1849 | if (fault_status == FSC_PERM && !write_fault && !exec_fault) { |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1850 | kvm_err("Unexpected L2 read permission error\n"); |
| 1851 | return -EFAULT; |
| 1852 | } |
| 1853 | |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1854 | /* Let's check if we will get back a huge page backed by hugetlbfs */ |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 1855 | mmap_read_lock(current->mm); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1856 | vma = find_vma_intersection(current->mm, hva, hva + 1); |
Ard Biesheuvel | 37b5440 | 2014-09-17 14:56:17 -0700 | [diff] [blame] | 1857 | if (unlikely(!vma)) { |
| 1858 | kvm_err("Failed to find VMA for hva 0x%lx\n", hva); |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 1859 | mmap_read_unlock(current->mm); |
Ard Biesheuvel | 37b5440 | 2014-09-17 14:56:17 -0700 | [diff] [blame] | 1860 | return -EFAULT; |
| 1861 | } |
| 1862 | |
James Morse | 1559b75 | 2019-12-17 12:38:09 +0000 | [diff] [blame] | 1863 | if (is_vm_hugetlb_page(vma)) |
| 1864 | vma_shift = huge_page_shift(hstate_vma(vma)); |
| 1865 | else |
| 1866 | vma_shift = PAGE_SHIFT; |
| 1867 | |
| 1868 | vma_pagesize = 1ULL << vma_shift; |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1869 | if (logging_active || |
Marc Zyngier | 6d674e2 | 2019-12-11 16:56:48 +0000 | [diff] [blame] | 1870 | (vma->vm_flags & VM_PFNMAP) || |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1871 | !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { |
| 1872 | force_pte = true; |
| 1873 | vma_pagesize = PAGE_SIZE; |
| 1874 | } |
| 1875 | |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1876 | /* |
Suzuki K Poulose | 280cebf | 2019-01-29 19:12:17 +0000 | [diff] [blame] | 1877 | * The stage2 has a minimum of 2 level table (For arm64 see |
| 1878 | * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can |
| 1879 | * use PMD_SIZE huge mappings (even when the PMD is folded into PGD). |
| 1880 | * As for PUD huge maps, we must make sure that we have at least |
| 1881 | * 3 levels, i.e, PMD is not folded. |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1882 | */ |
Suzuki K Poulose | a80868f | 2019-03-12 09:52:51 +0000 | [diff] [blame] | 1883 | if (vma_pagesize == PMD_SIZE || |
| 1884 | (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1885 | gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 1886 | mmap_read_unlock(current->mm); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1887 | |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1888 | /* We need minimum second+third level pages */ |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 1889 | ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm)); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1890 | if (ret) |
| 1891 | return ret; |
| 1892 | |
| 1893 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
| 1894 | /* |
| 1895 | * Ensure the read of mmu_notifier_seq happens before we call |
| 1896 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk |
| 1897 | * the page we just got a reference to gets unmapped before we have a |
| 1898 | * chance to grab the mmu_lock, which ensure that if the page gets |
| 1899 | * unmapped afterwards, the call to kvm_unmap_hva will take it away |
| 1900 | * from us again properly. This smp_rmb() interacts with the smp_wmb() |
| 1901 | * in kvm_mmu_notifier_invalidate_<page|range_end>. |
| 1902 | */ |
| 1903 | smp_rmb(); |
| 1904 | |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1905 | pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); |
James Morse | 196f878 | 2017-06-20 17:11:48 +0100 | [diff] [blame] | 1906 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
James Morse | 1559b75 | 2019-12-17 12:38:09 +0000 | [diff] [blame] | 1907 | kvm_send_hwpoison_signal(hva, vma_shift); |
James Morse | 196f878 | 2017-06-20 17:11:48 +0100 | [diff] [blame] | 1908 | return 0; |
| 1909 | } |
Christoffer Dall | 9ac7159 | 2016-08-17 10:46:10 +0200 | [diff] [blame] | 1910 | if (is_error_noslot_pfn(pfn)) |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1911 | return -EFAULT; |
| 1912 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1913 | if (kvm_is_device_pfn(pfn)) { |
Kim Phillips | b886576 | 2014-06-26 01:45:51 +0100 | [diff] [blame] | 1914 | mem_type = PAGE_S2_DEVICE; |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1915 | flags |= KVM_S2PTE_FLAG_IS_IOMAP; |
| 1916 | } else if (logging_active) { |
| 1917 | /* |
| 1918 | * Faults on pages in a memslot with logging enabled |
| 1919 | * should not be mapped with huge pages (it introduces churn |
| 1920 | * and performance degradation), so force a pte mapping. |
| 1921 | */ |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1922 | flags |= KVM_S2_FLAG_LOGGING_ACTIVE; |
| 1923 | |
| 1924 | /* |
| 1925 | * Only actually map the page as writable if this was a write |
| 1926 | * fault. |
| 1927 | */ |
| 1928 | if (!write_fault) |
| 1929 | writable = false; |
| 1930 | } |
Kim Phillips | b886576 | 2014-06-26 01:45:51 +0100 | [diff] [blame] | 1931 | |
Marc Zyngier | 6d674e2 | 2019-12-11 16:56:48 +0000 | [diff] [blame] | 1932 | if (exec_fault && is_iomap(flags)) |
| 1933 | return -ENOEXEC; |
| 1934 | |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1935 | spin_lock(&kvm->mmu_lock); |
| 1936 | if (mmu_notifier_retry(kvm, mmu_seq)) |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 1937 | goto out_unlock; |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1938 | |
Suzuki K Poulose | 0529c90 | 2020-05-07 20:35:46 +0800 | [diff] [blame] | 1939 | /* |
| 1940 | * If we are not forced to use page mapping, check if we are |
| 1941 | * backed by a THP and thus use block mapping if possible. |
| 1942 | */ |
| 1943 | if (vma_pagesize == PAGE_SIZE && !force_pte) |
| 1944 | vma_pagesize = transparent_hugepage_adjust(memslot, hva, |
| 1945 | &pfn, &fault_ipa); |
Punit Agrawal | 3f58bf6 | 2018-12-11 17:10:34 +0000 | [diff] [blame] | 1946 | if (writable) |
| 1947 | kvm_set_pfn_dirty(pfn); |
| 1948 | |
Marc Zyngier | 6d674e2 | 2019-12-11 16:56:48 +0000 | [diff] [blame] | 1949 | if (fault_status != FSC_PERM && !is_iomap(flags)) |
Punit Agrawal | 3f58bf6 | 2018-12-11 17:10:34 +0000 | [diff] [blame] | 1950 | clean_dcache_guest_page(pfn, vma_pagesize); |
| 1951 | |
| 1952 | if (exec_fault) |
| 1953 | invalidate_icache_guest_page(pfn, vma_pagesize); |
| 1954 | |
Punit Agrawal | 6396b85 | 2018-12-11 17:10:35 +0000 | [diff] [blame] | 1955 | /* |
| 1956 | * If we took an execution fault we have made the |
| 1957 | * icache/dcache coherent above and should now let the s2 |
| 1958 | * mapping be executable. |
| 1959 | * |
| 1960 | * Write faults (!exec_fault && FSC_PERM) are orthogonal to |
| 1961 | * execute permissions, and we preserve whatever we have. |
| 1962 | */ |
| 1963 | needs_exec = exec_fault || |
Will Deacon | b757b47 | 2020-07-23 11:17:14 +0100 | [diff] [blame] | 1964 | (fault_status == FSC_PERM && |
Paolo Bonzini | 0378dae | 2020-08-09 12:58:23 -0400 | [diff] [blame] | 1965 | stage2_is_exec(mmu, fault_ipa, vma_pagesize)); |
Punit Agrawal | 6396b85 | 2018-12-11 17:10:35 +0000 | [diff] [blame] | 1966 | |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1967 | if (vma_pagesize == PUD_SIZE) { |
| 1968 | pud_t new_pud = kvm_pfn_pud(pfn, mem_type); |
| 1969 | |
| 1970 | new_pud = kvm_pud_mkhuge(new_pud); |
| 1971 | if (writable) |
| 1972 | new_pud = kvm_s2pud_mkwrite(new_pud); |
| 1973 | |
| 1974 | if (needs_exec) |
| 1975 | new_pud = kvm_s2pud_mkexec(new_pud); |
| 1976 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1977 | ret = stage2_set_pud_huge(mmu, memcache, fault_ipa, &new_pud); |
Punit Agrawal | b8e0ba7 | 2018-12-11 17:10:41 +0000 | [diff] [blame] | 1978 | } else if (vma_pagesize == PMD_SIZE) { |
Punit Agrawal | f8df733 | 2018-12-11 17:10:36 +0000 | [diff] [blame] | 1979 | pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type); |
| 1980 | |
| 1981 | new_pmd = kvm_pmd_mkhuge(new_pmd); |
| 1982 | |
Punit Agrawal | 3f58bf6 | 2018-12-11 17:10:34 +0000 | [diff] [blame] | 1983 | if (writable) |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 1984 | new_pmd = kvm_s2pmd_mkwrite(new_pmd); |
Marc Zyngier | d0e22b4 | 2017-10-23 17:11:19 +0100 | [diff] [blame] | 1985 | |
Punit Agrawal | 6396b85 | 2018-12-11 17:10:35 +0000 | [diff] [blame] | 1986 | if (needs_exec) |
Marc Zyngier | d0e22b4 | 2017-10-23 17:11:19 +0100 | [diff] [blame] | 1987 | new_pmd = kvm_s2pmd_mkexec(new_pmd); |
Marc Zyngier | a15f693 | 2017-10-23 17:11:15 +0100 | [diff] [blame] | 1988 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 1989 | ret = stage2_set_pmd_huge(mmu, memcache, fault_ipa, &new_pmd); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1990 | } else { |
Punit Agrawal | f8df733 | 2018-12-11 17:10:36 +0000 | [diff] [blame] | 1991 | pte_t new_pte = kvm_pfn_pte(pfn, mem_type); |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1992 | |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1993 | if (writable) { |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 1994 | new_pte = kvm_s2pte_mkwrite(new_pte); |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 1995 | mark_page_dirty(kvm, gfn); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 1996 | } |
Marc Zyngier | a9c0e12 | 2017-10-23 17:11:20 +0100 | [diff] [blame] | 1997 | |
Punit Agrawal | 6396b85 | 2018-12-11 17:10:35 +0000 | [diff] [blame] | 1998 | if (needs_exec) |
Marc Zyngier | d0e22b4 | 2017-10-23 17:11:19 +0100 | [diff] [blame] | 1999 | new_pte = kvm_s2pte_mkexec(new_pte); |
Marc Zyngier | a15f693 | 2017-10-23 17:11:15 +0100 | [diff] [blame] | 2000 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2001 | ret = stage2_set_pte(mmu, memcache, fault_ipa, &new_pte, flags); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2002 | } |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 2003 | |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2004 | out_unlock: |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 2005 | spin_unlock(&kvm->mmu_lock); |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2006 | kvm_set_pfn_accessed(pfn); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2007 | kvm_release_pfn_clean(pfn); |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 2008 | return ret; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2009 | } |
| 2010 | |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 2011 | /* |
| 2012 | * Resolve the access fault by making the page young again. |
| 2013 | * Note that because the faulting entry is guaranteed not to be |
| 2014 | * cached in the TLB, we don't need to invalidate anything. |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 2015 | * Only the HW Access Flag updates are supported for Stage 2 (no DBM), |
| 2016 | * so there is no need for atomic (pte|pmd)_mkyoung operations. |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 2017 | */ |
| 2018 | static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) |
| 2019 | { |
Punit Agrawal | eb3f0624 | 2018-12-11 17:10:39 +0000 | [diff] [blame] | 2020 | pud_t *pud; |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 2021 | pmd_t *pmd; |
| 2022 | pte_t *pte; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2023 | kvm_pfn_t pfn; |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 2024 | bool pfn_valid = false; |
| 2025 | |
| 2026 | trace_kvm_access_fault(fault_ipa); |
| 2027 | |
| 2028 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2029 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2030 | if (!stage2_get_leaf_entry(vcpu->arch.hw_mmu, fault_ipa, &pud, &pmd, &pte)) |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 2031 | goto out; |
| 2032 | |
Punit Agrawal | eb3f0624 | 2018-12-11 17:10:39 +0000 | [diff] [blame] | 2033 | if (pud) { /* HugeTLB */ |
| 2034 | *pud = kvm_s2pud_mkyoung(*pud); |
| 2035 | pfn = kvm_pud_pfn(*pud); |
| 2036 | pfn_valid = true; |
| 2037 | } else if (pmd) { /* THP, HugeTLB */ |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 2038 | *pmd = pmd_mkyoung(*pmd); |
| 2039 | pfn = pmd_pfn(*pmd); |
| 2040 | pfn_valid = true; |
Punit Agrawal | eb3f0624 | 2018-12-11 17:10:39 +0000 | [diff] [blame] | 2041 | } else { |
| 2042 | *pte = pte_mkyoung(*pte); /* Just a page... */ |
| 2043 | pfn = pte_pfn(*pte); |
| 2044 | pfn_valid = true; |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 2045 | } |
| 2046 | |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 2047 | out: |
| 2048 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2049 | if (pfn_valid) |
| 2050 | kvm_set_pfn_accessed(pfn); |
| 2051 | } |
| 2052 | |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2053 | /** |
| 2054 | * kvm_handle_guest_abort - handles all 2nd stage aborts |
| 2055 | * @vcpu: the VCPU pointer |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2056 | * |
| 2057 | * Any abort that gets to the host is almost guaranteed to be caused by a |
| 2058 | * missing second stage translation table entry, which can mean that either the |
| 2059 | * guest simply needs more memory and we must allocate an appropriate page or it |
| 2060 | * can mean that the guest tried to access I/O memory, which is emulated by user |
| 2061 | * space. The distinction is based on the IPA causing the fault and whether this |
| 2062 | * memory region has been registered as standard RAM by user space. |
| 2063 | */ |
Tianjia Zhang | 74cc7e0 | 2020-06-23 21:14:15 +0800 | [diff] [blame] | 2064 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2065 | { |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2066 | unsigned long fault_status; |
| 2067 | phys_addr_t fault_ipa; |
| 2068 | struct kvm_memory_slot *memslot; |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 2069 | unsigned long hva; |
| 2070 | bool is_iabt, write_fault, writable; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2071 | gfn_t gfn; |
| 2072 | int ret, idx; |
| 2073 | |
Tyler Baicar | 621f48e | 2017-06-21 12:17:14 -0600 | [diff] [blame] | 2074 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); |
| 2075 | |
| 2076 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
James Morse | bb42892 | 2017-07-18 13:37:41 +0100 | [diff] [blame] | 2077 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
Tyler Baicar | 621f48e | 2017-06-21 12:17:14 -0600 | [diff] [blame] | 2078 | |
James Morse | bb42892 | 2017-07-18 13:37:41 +0100 | [diff] [blame] | 2079 | /* Synchronous External Abort? */ |
Will Deacon | c9a636f | 2020-07-29 11:28:18 +0100 | [diff] [blame] | 2080 | if (kvm_vcpu_abt_issea(vcpu)) { |
James Morse | bb42892 | 2017-07-18 13:37:41 +0100 | [diff] [blame] | 2081 | /* |
| 2082 | * For RAS the host kernel may handle this abort. |
| 2083 | * There is no need to pass the error into the guest. |
| 2084 | */ |
Will Deacon | 84b951a | 2020-07-29 11:28:19 +0100 | [diff] [blame] | 2085 | if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) |
James Morse | bb42892 | 2017-07-18 13:37:41 +0100 | [diff] [blame] | 2086 | kvm_inject_vabt(vcpu); |
Will Deacon | 84b951a | 2020-07-29 11:28:19 +0100 | [diff] [blame] | 2087 | |
| 2088 | return 1; |
Marc Zyngier | 4055710 | 2016-09-06 14:02:15 +0100 | [diff] [blame] | 2089 | } |
| 2090 | |
Gavin Shan | 3a949f4 | 2020-06-30 11:57:05 +1000 | [diff] [blame] | 2091 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu), |
Marc Zyngier | 7393b59 | 2012-09-17 19:27:09 +0100 | [diff] [blame] | 2092 | kvm_vcpu_get_hfar(vcpu), fault_ipa); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2093 | |
| 2094 | /* Check the stage-2 fault is trans. fault or write fault */ |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2095 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM && |
| 2096 | fault_status != FSC_ACCESS) { |
Christoffer Dall | 0496daa5 | 2014-09-26 12:29:34 +0200 | [diff] [blame] | 2097 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
| 2098 | kvm_vcpu_trap_get_class(vcpu), |
| 2099 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), |
Gavin Shan | 3a949f4 | 2020-06-30 11:57:05 +1000 | [diff] [blame] | 2100 | (unsigned long)kvm_vcpu_get_esr(vcpu)); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2101 | return -EFAULT; |
| 2102 | } |
| 2103 | |
| 2104 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 2105 | |
| 2106 | gfn = fault_ipa >> PAGE_SHIFT; |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 2107 | memslot = gfn_to_memslot(vcpu->kvm, gfn); |
| 2108 | hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); |
Ard Biesheuvel | a7d079c | 2014-09-09 11:27:09 +0100 | [diff] [blame] | 2109 | write_fault = kvm_is_write_fault(vcpu); |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 2110 | if (kvm_is_error_hva(hva) || (write_fault && !writable)) { |
Will Deacon | 022c832 | 2020-07-29 11:28:21 +0100 | [diff] [blame] | 2111 | /* |
| 2112 | * The guest has put either its instructions or its page-tables |
| 2113 | * somewhere it shouldn't have. Userspace won't be able to do |
| 2114 | * anything about this (there's no syndrome for a start), so |
| 2115 | * re-inject the abort back into the guest. |
| 2116 | */ |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2117 | if (is_iabt) { |
Marc Zyngier | 6d674e2 | 2019-12-11 16:56:48 +0000 | [diff] [blame] | 2118 | ret = -ENOEXEC; |
| 2119 | goto out; |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2120 | } |
| 2121 | |
Will Deacon | 022c832 | 2020-07-29 11:28:21 +0100 | [diff] [blame] | 2122 | if (kvm_vcpu_dabt_iss1tw(vcpu)) { |
| 2123 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
| 2124 | ret = 1; |
| 2125 | goto out_unlock; |
| 2126 | } |
| 2127 | |
Marc Zyngier | cfe3950 | 2012-12-12 14:42:09 +0000 | [diff] [blame] | 2128 | /* |
Marc Zyngier | 57c841f | 2016-01-29 15:01:28 +0000 | [diff] [blame] | 2129 | * Check for a cache maintenance operation. Since we |
| 2130 | * ended-up here, we know it is outside of any memory |
| 2131 | * slot. But we can't find out if that is for a device, |
| 2132 | * or if the guest is just being stupid. The only thing |
| 2133 | * we know for sure is that this range cannot be cached. |
| 2134 | * |
| 2135 | * So let's assume that the guest is just being |
| 2136 | * cautious, and skip the instruction. |
| 2137 | */ |
Will Deacon | 54dc0d2 | 2020-07-29 11:28:20 +0100 | [diff] [blame] | 2138 | if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) { |
Marc Zyngier | 57c841f | 2016-01-29 15:01:28 +0000 | [diff] [blame] | 2139 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
| 2140 | ret = 1; |
| 2141 | goto out_unlock; |
| 2142 | } |
| 2143 | |
| 2144 | /* |
Marc Zyngier | cfe3950 | 2012-12-12 14:42:09 +0000 | [diff] [blame] | 2145 | * The IPA is reported as [MAX:12], so we need to |
| 2146 | * complement it with the bottom 12 bits from the |
| 2147 | * faulting VA. This is always 12 bits, irrespective |
| 2148 | * of the page size. |
| 2149 | */ |
| 2150 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); |
Tianjia Zhang | 74cc7e0 | 2020-06-23 21:14:15 +0800 | [diff] [blame] | 2151 | ret = io_mem_abort(vcpu, fault_ipa); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2152 | goto out_unlock; |
| 2153 | } |
| 2154 | |
Christoffer Dall | c3058d5 | 2014-10-10 12:14:29 +0200 | [diff] [blame] | 2155 | /* Userspace should not be able to register out-of-bounds IPAs */ |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 2156 | VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm)); |
Christoffer Dall | c3058d5 | 2014-10-10 12:14:29 +0200 | [diff] [blame] | 2157 | |
Marc Zyngier | aeda913 | 2015-03-12 18:16:52 +0000 | [diff] [blame] | 2158 | if (fault_status == FSC_ACCESS) { |
| 2159 | handle_access_fault(vcpu, fault_ipa); |
| 2160 | ret = 1; |
| 2161 | goto out_unlock; |
| 2162 | } |
| 2163 | |
Christoffer Dall | 9804788 | 2014-08-19 12:18:04 +0200 | [diff] [blame] | 2164 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2165 | if (ret == 0) |
| 2166 | ret = 1; |
Marc Zyngier | 6d674e2 | 2019-12-11 16:56:48 +0000 | [diff] [blame] | 2167 | out: |
| 2168 | if (ret == -ENOEXEC) { |
| 2169 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
| 2170 | ret = 1; |
| 2171 | } |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 2172 | out_unlock: |
| 2173 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 2174 | return ret; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2175 | } |
| 2176 | |
Marc Zyngier | 1d2ebac | 2015-03-12 18:16:50 +0000 | [diff] [blame] | 2177 | static int handle_hva_to_gpa(struct kvm *kvm, |
| 2178 | unsigned long start, |
| 2179 | unsigned long end, |
| 2180 | int (*handler)(struct kvm *kvm, |
Suzuki K Poulose | 056aad6 | 2017-03-20 18:26:42 +0000 | [diff] [blame] | 2181 | gpa_t gpa, u64 size, |
| 2182 | void *data), |
Marc Zyngier | 1d2ebac | 2015-03-12 18:16:50 +0000 | [diff] [blame] | 2183 | void *data) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2184 | { |
| 2185 | struct kvm_memslots *slots; |
| 2186 | struct kvm_memory_slot *memslot; |
Marc Zyngier | 1d2ebac | 2015-03-12 18:16:50 +0000 | [diff] [blame] | 2187 | int ret = 0; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2188 | |
| 2189 | slots = kvm_memslots(kvm); |
| 2190 | |
| 2191 | /* we only care about the pages that the guest sees */ |
| 2192 | kvm_for_each_memslot(memslot, slots) { |
| 2193 | unsigned long hva_start, hva_end; |
Suzuki K Poulose | 056aad6 | 2017-03-20 18:26:42 +0000 | [diff] [blame] | 2194 | gfn_t gpa; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2195 | |
| 2196 | hva_start = max(start, memslot->userspace_addr); |
| 2197 | hva_end = min(end, memslot->userspace_addr + |
| 2198 | (memslot->npages << PAGE_SHIFT)); |
| 2199 | if (hva_start >= hva_end) |
| 2200 | continue; |
| 2201 | |
Suzuki K Poulose | 056aad6 | 2017-03-20 18:26:42 +0000 | [diff] [blame] | 2202 | gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; |
| 2203 | ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2204 | } |
Marc Zyngier | 1d2ebac | 2015-03-12 18:16:50 +0000 | [diff] [blame] | 2205 | |
| 2206 | return ret; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2207 | } |
| 2208 | |
Suzuki K Poulose | 056aad6 | 2017-03-20 18:26:42 +0000 | [diff] [blame] | 2209 | static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2210 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2211 | unmap_stage2_range(&kvm->arch.mmu, gpa, size); |
Marc Zyngier | 1d2ebac | 2015-03-12 18:16:50 +0000 | [diff] [blame] | 2212 | return 0; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2213 | } |
| 2214 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2215 | int kvm_unmap_hva_range(struct kvm *kvm, |
Will Deacon | fdfe7cb | 2020-08-11 11:27:24 +0100 | [diff] [blame^] | 2216 | unsigned long start, unsigned long end, unsigned flags) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2217 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2218 | if (!kvm->arch.mmu.pgd) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2219 | return 0; |
| 2220 | |
| 2221 | trace_kvm_unmap_hva_range(start, end); |
| 2222 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); |
| 2223 | return 0; |
| 2224 | } |
| 2225 | |
Suzuki K Poulose | 056aad6 | 2017-03-20 18:26:42 +0000 | [diff] [blame] | 2226 | static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2227 | { |
| 2228 | pte_t *pte = (pte_t *)data; |
| 2229 | |
Suzuki K Poulose | 056aad6 | 2017-03-20 18:26:42 +0000 | [diff] [blame] | 2230 | WARN_ON(size != PAGE_SIZE); |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 2231 | /* |
| 2232 | * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE |
| 2233 | * flag clear because MMU notifiers will have unmapped a huge PMD before |
| 2234 | * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and |
| 2235 | * therefore stage2_set_pte() never needs to clear out a huge PMD |
| 2236 | * through this calling path. |
| 2237 | */ |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2238 | stage2_set_pte(&kvm->arch.mmu, NULL, gpa, pte, 0); |
Marc Zyngier | 1d2ebac | 2015-03-12 18:16:50 +0000 | [diff] [blame] | 2239 | return 0; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2240 | } |
| 2241 | |
| 2242 | |
Lan Tianyu | 748c0e3 | 2018-12-06 21:21:10 +0800 | [diff] [blame] | 2243 | int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2244 | { |
| 2245 | unsigned long end = hva + PAGE_SIZE; |
Marc Zyngier | 694556d | 2018-08-23 09:58:27 +0100 | [diff] [blame] | 2246 | kvm_pfn_t pfn = pte_pfn(pte); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2247 | pte_t stage2_pte; |
| 2248 | |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2249 | if (!kvm->arch.mmu.pgd) |
Lan Tianyu | 748c0e3 | 2018-12-06 21:21:10 +0800 | [diff] [blame] | 2250 | return 0; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2251 | |
| 2252 | trace_kvm_set_spte_hva(hva); |
Marc Zyngier | 694556d | 2018-08-23 09:58:27 +0100 | [diff] [blame] | 2253 | |
| 2254 | /* |
| 2255 | * We've moved a page around, probably through CoW, so let's treat it |
| 2256 | * just like a translation fault and clean the cache to the PoC. |
| 2257 | */ |
| 2258 | clean_dcache_guest_page(pfn, PAGE_SIZE); |
Punit Agrawal | f8df733 | 2018-12-11 17:10:36 +0000 | [diff] [blame] | 2259 | stage2_pte = kvm_pfn_pte(pfn, PAGE_S2); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2260 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); |
Lan Tianyu | 748c0e3 | 2018-12-06 21:21:10 +0800 | [diff] [blame] | 2261 | |
| 2262 | return 0; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2263 | } |
| 2264 | |
Suzuki K Poulose | 056aad6 | 2017-03-20 18:26:42 +0000 | [diff] [blame] | 2265 | static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2266 | { |
Punit Agrawal | 35a6396 | 2018-12-11 17:10:40 +0000 | [diff] [blame] | 2267 | pud_t *pud; |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2268 | pmd_t *pmd; |
| 2269 | pte_t *pte; |
| 2270 | |
Punit Agrawal | 35a6396 | 2018-12-11 17:10:40 +0000 | [diff] [blame] | 2271 | WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2272 | if (!stage2_get_leaf_entry(&kvm->arch.mmu, gpa, &pud, &pmd, &pte)) |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2273 | return 0; |
| 2274 | |
Punit Agrawal | 35a6396 | 2018-12-11 17:10:40 +0000 | [diff] [blame] | 2275 | if (pud) |
| 2276 | return stage2_pudp_test_and_clear_young(pud); |
| 2277 | else if (pmd) |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 2278 | return stage2_pmdp_test_and_clear_young(pmd); |
Punit Agrawal | 35a6396 | 2018-12-11 17:10:40 +0000 | [diff] [blame] | 2279 | else |
| 2280 | return stage2_ptep_test_and_clear_young(pte); |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2281 | } |
| 2282 | |
Suzuki K Poulose | 056aad6 | 2017-03-20 18:26:42 +0000 | [diff] [blame] | 2283 | static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2284 | { |
Punit Agrawal | 35a6396 | 2018-12-11 17:10:40 +0000 | [diff] [blame] | 2285 | pud_t *pud; |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2286 | pmd_t *pmd; |
| 2287 | pte_t *pte; |
| 2288 | |
Punit Agrawal | 35a6396 | 2018-12-11 17:10:40 +0000 | [diff] [blame] | 2289 | WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2290 | if (!stage2_get_leaf_entry(&kvm->arch.mmu, gpa, &pud, &pmd, &pte)) |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2291 | return 0; |
| 2292 | |
Punit Agrawal | 35a6396 | 2018-12-11 17:10:40 +0000 | [diff] [blame] | 2293 | if (pud) |
| 2294 | return kvm_s2pud_young(*pud); |
| 2295 | else if (pmd) |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2296 | return pmd_young(*pmd); |
Punit Agrawal | 35a6396 | 2018-12-11 17:10:40 +0000 | [diff] [blame] | 2297 | else |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2298 | return pte_young(*pte); |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2299 | } |
| 2300 | |
| 2301 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) |
| 2302 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2303 | if (!kvm->arch.mmu.pgd) |
Suzuki K Poulose | 7e5a672 | 2017-07-05 09:57:00 +0100 | [diff] [blame] | 2304 | return 0; |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2305 | trace_kvm_age_hva(start, end); |
| 2306 | return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); |
| 2307 | } |
| 2308 | |
| 2309 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) |
| 2310 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2311 | if (!kvm->arch.mmu.pgd) |
Suzuki K Poulose | 7e5a672 | 2017-07-05 09:57:00 +0100 | [diff] [blame] | 2312 | return 0; |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2313 | trace_kvm_test_age_hva(hva); |
Gavin Shan | cf2d23e | 2020-01-21 16:56:59 +1100 | [diff] [blame] | 2314 | return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE, |
| 2315 | kvm_test_age_hva_handler, NULL); |
Marc Zyngier | 35307b9 | 2015-03-12 18:16:51 +0000 | [diff] [blame] | 2316 | } |
| 2317 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2318 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
| 2319 | { |
Sean Christopherson | c1a33ae | 2020-07-02 19:35:42 -0700 | [diff] [blame] | 2320 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2321 | } |
| 2322 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2323 | phys_addr_t kvm_mmu_get_httbr(void) |
| 2324 | { |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 2325 | if (__kvm_cpu_uses_extended_idmap()) |
| 2326 | return virt_to_phys(merged_hyp_pgd); |
| 2327 | else |
| 2328 | return virt_to_phys(hyp_pgd); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2329 | } |
| 2330 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 2331 | phys_addr_t kvm_get_idmap_vector(void) |
| 2332 | { |
| 2333 | return hyp_idmap_vector; |
| 2334 | } |
| 2335 | |
Marc Zyngier | 0535a3e | 2016-06-30 18:40:43 +0100 | [diff] [blame] | 2336 | static int kvm_map_idmap_text(pgd_t *pgd) |
| 2337 | { |
| 2338 | int err; |
| 2339 | |
| 2340 | /* Create the idmap in the boot page tables */ |
Kristina Martsenko | 98732d1 | 2018-01-15 15:23:49 +0000 | [diff] [blame] | 2341 | err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(), |
Marc Zyngier | 0535a3e | 2016-06-30 18:40:43 +0100 | [diff] [blame] | 2342 | hyp_idmap_start, hyp_idmap_end, |
| 2343 | __phys_to_pfn(hyp_idmap_start), |
| 2344 | PAGE_HYP_EXEC); |
| 2345 | if (err) |
| 2346 | kvm_err("Failed to idmap %lx-%lx\n", |
| 2347 | hyp_idmap_start, hyp_idmap_end); |
| 2348 | |
| 2349 | return err; |
| 2350 | } |
| 2351 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2352 | int kvm_mmu_init(void) |
| 2353 | { |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 2354 | int err; |
| 2355 | |
Andrew Scull | 0a78791 | 2020-05-19 11:40:36 +0100 | [diff] [blame] | 2356 | hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); |
Marc Zyngier | 46fef15 | 2018-03-12 14:25:10 +0000 | [diff] [blame] | 2357 | hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); |
Andrew Scull | 0a78791 | 2020-05-19 11:40:36 +0100 | [diff] [blame] | 2358 | hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end); |
Marc Zyngier | 46fef15 | 2018-03-12 14:25:10 +0000 | [diff] [blame] | 2359 | hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE); |
Andrew Scull | 0a78791 | 2020-05-19 11:40:36 +0100 | [diff] [blame] | 2360 | hyp_idmap_vector = __pa_symbol(__kvm_hyp_init); |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 2361 | |
Ard Biesheuvel | 06f75a1 | 2015-03-19 16:42:26 +0000 | [diff] [blame] | 2362 | /* |
| 2363 | * We rely on the linker script to ensure at build time that the HYP |
| 2364 | * init code does not cross a page boundary. |
| 2365 | */ |
| 2366 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 2367 | |
Marc Zyngier | b4ef049 | 2017-12-03 20:04:51 +0000 | [diff] [blame] | 2368 | kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); |
| 2369 | kvm_debug("HYP VA range: %lx:%lx\n", |
| 2370 | kern_hyp_va(PAGE_OFFSET), |
| 2371 | kern_hyp_va((unsigned long)high_memory - 1)); |
Marc Zyngier | eac378a | 2016-06-30 18:40:50 +0100 | [diff] [blame] | 2372 | |
Marc Zyngier | 6c41a41 | 2016-06-30 18:40:51 +0100 | [diff] [blame] | 2373 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && |
Marc Zyngier | ed57cac | 2017-12-03 18:22:49 +0000 | [diff] [blame] | 2374 | hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) && |
Marc Zyngier | d2896d4 | 2016-08-22 09:01:17 +0100 | [diff] [blame] | 2375 | hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) { |
Marc Zyngier | eac378a | 2016-06-30 18:40:50 +0100 | [diff] [blame] | 2376 | /* |
| 2377 | * The idmap page is intersecting with the VA space, |
| 2378 | * it is not safe to continue further. |
| 2379 | */ |
| 2380 | kvm_err("IDMAP intersecting with HYP VA, unable to continue\n"); |
| 2381 | err = -EINVAL; |
| 2382 | goto out; |
| 2383 | } |
| 2384 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 2385 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); |
Marc Zyngier | 0535a3e | 2016-06-30 18:40:43 +0100 | [diff] [blame] | 2386 | if (!hyp_pgd) { |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2387 | kvm_err("Hyp mode PGD not allocated\n"); |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 2388 | err = -ENOMEM; |
| 2389 | goto out; |
| 2390 | } |
| 2391 | |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 2392 | if (__kvm_cpu_uses_extended_idmap()) { |
Marc Zyngier | 0535a3e | 2016-06-30 18:40:43 +0100 | [diff] [blame] | 2393 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
| 2394 | hyp_pgd_order); |
| 2395 | if (!boot_hyp_pgd) { |
| 2396 | kvm_err("Hyp boot PGD not allocated\n"); |
| 2397 | err = -ENOMEM; |
| 2398 | goto out; |
| 2399 | } |
| 2400 | |
| 2401 | err = kvm_map_idmap_text(boot_hyp_pgd); |
| 2402 | if (err) |
| 2403 | goto out; |
| 2404 | |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 2405 | merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
| 2406 | if (!merged_hyp_pgd) { |
| 2407 | kvm_err("Failed to allocate extra HYP pgd\n"); |
| 2408 | goto out; |
| 2409 | } |
| 2410 | __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd, |
| 2411 | hyp_idmap_start); |
Marc Zyngier | 0535a3e | 2016-06-30 18:40:43 +0100 | [diff] [blame] | 2412 | } else { |
| 2413 | err = kvm_map_idmap_text(hyp_pgd); |
| 2414 | if (err) |
| 2415 | goto out; |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 2416 | } |
| 2417 | |
Marc Zyngier | e3f019b | 2017-12-04 17:04:38 +0000 | [diff] [blame] | 2418 | io_map_base = hyp_idmap_start; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 2419 | return 0; |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 2420 | out: |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 2421 | free_hyp_pgds(); |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 2422 | return err; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 2423 | } |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 2424 | |
| 2425 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 2426 | const struct kvm_userspace_memory_region *mem, |
Sean Christopherson | 9d4c197 | 2020-02-18 13:07:24 -0800 | [diff] [blame] | 2427 | struct kvm_memory_slot *old, |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 2428 | const struct kvm_memory_slot *new, |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 2429 | enum kvm_mr_change change) |
| 2430 | { |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 2431 | /* |
| 2432 | * At this point memslot has been committed and there is an |
Fuad Tabba | 656012c | 2020-04-01 15:03:10 +0100 | [diff] [blame] | 2433 | * allocated dirty_bitmap[], dirty pages will be tracked while the |
Mario Smarduch | c647355 | 2015-01-15 15:58:56 -0800 | [diff] [blame] | 2434 | * memory slot is write protected. |
| 2435 | */ |
Keqian Zhu | c862626 | 2020-04-13 20:20:23 +0800 | [diff] [blame] | 2436 | if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
| 2437 | /* |
| 2438 | * If we're with initial-all-set, we don't need to write |
| 2439 | * protect any pages because they're all reported as dirty. |
| 2440 | * Huge pages and normal pages will be write protect gradually. |
| 2441 | */ |
| 2442 | if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) { |
| 2443 | kvm_mmu_wp_memory_region(kvm, mem->slot); |
| 2444 | } |
| 2445 | } |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 2446 | } |
| 2447 | |
| 2448 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
| 2449 | struct kvm_memory_slot *memslot, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 2450 | const struct kvm_userspace_memory_region *mem, |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 2451 | enum kvm_mr_change change) |
| 2452 | { |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 2453 | hva_t hva = mem->userspace_addr; |
| 2454 | hva_t reg_end = hva + mem->memory_size; |
| 2455 | bool writable = !(mem->flags & KVM_MEM_READONLY); |
| 2456 | int ret = 0; |
| 2457 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 2458 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && |
| 2459 | change != KVM_MR_FLAGS_ONLY) |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 2460 | return 0; |
| 2461 | |
| 2462 | /* |
Christoffer Dall | c3058d5 | 2014-10-10 12:14:29 +0200 | [diff] [blame] | 2463 | * Prevent userspace from creating a memory region outside of the IPA |
| 2464 | * space addressable by the KVM guest IPA space. |
| 2465 | */ |
| 2466 | if (memslot->base_gfn + memslot->npages >= |
Suzuki K Poulose | e55cac5 | 2018-09-26 17:32:44 +0100 | [diff] [blame] | 2467 | (kvm_phys_size(kvm) >> PAGE_SHIFT)) |
Christoffer Dall | c3058d5 | 2014-10-10 12:14:29 +0200 | [diff] [blame] | 2468 | return -EFAULT; |
| 2469 | |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 2470 | mmap_read_lock(current->mm); |
Christoffer Dall | c3058d5 | 2014-10-10 12:14:29 +0200 | [diff] [blame] | 2471 | /* |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 2472 | * A memory region could potentially cover multiple VMAs, and any holes |
| 2473 | * between them, so iterate over all of them to find out if we can map |
| 2474 | * any of them right now. |
| 2475 | * |
| 2476 | * +--------------------------------------------+ |
| 2477 | * +---------------+----------------+ +----------------+ |
| 2478 | * | : VMA 1 | VMA 2 | | VMA 3 : | |
| 2479 | * +---------------+----------------+ +----------------+ |
| 2480 | * | memory region | |
| 2481 | * +--------------------------------------------+ |
| 2482 | */ |
| 2483 | do { |
| 2484 | struct vm_area_struct *vma = find_vma(current->mm, hva); |
| 2485 | hva_t vm_start, vm_end; |
| 2486 | |
| 2487 | if (!vma || vma->vm_start >= reg_end) |
| 2488 | break; |
| 2489 | |
| 2490 | /* |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 2491 | * Take the intersection of this VMA with the memory region |
| 2492 | */ |
| 2493 | vm_start = max(hva, vma->vm_start); |
| 2494 | vm_end = min(reg_end, vma->vm_end); |
| 2495 | |
| 2496 | if (vma->vm_flags & VM_PFNMAP) { |
| 2497 | gpa_t gpa = mem->guest_phys_addr + |
| 2498 | (vm_start - mem->userspace_addr); |
Marek Majtyka | ca09f02 | 2015-09-16 12:04:55 +0200 | [diff] [blame] | 2499 | phys_addr_t pa; |
| 2500 | |
| 2501 | pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; |
| 2502 | pa += vm_start - vma->vm_start; |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 2503 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 2504 | /* IO region dirty page logging not allowed */ |
Marc Zyngier | 72f3104 | 2017-03-16 18:20:50 +0000 | [diff] [blame] | 2505 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
| 2506 | ret = -EINVAL; |
| 2507 | goto out; |
| 2508 | } |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 2509 | |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 2510 | ret = kvm_phys_addr_ioremap(kvm, gpa, pa, |
| 2511 | vm_end - vm_start, |
| 2512 | writable); |
| 2513 | if (ret) |
| 2514 | break; |
| 2515 | } |
| 2516 | hva = vm_end; |
| 2517 | } while (hva < reg_end); |
| 2518 | |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 2519 | if (change == KVM_MR_FLAGS_ONLY) |
Marc Zyngier | 72f3104 | 2017-03-16 18:20:50 +0000 | [diff] [blame] | 2520 | goto out; |
Mario Smarduch | 15a49a4 | 2015-01-15 15:58:58 -0800 | [diff] [blame] | 2521 | |
Ard Biesheuvel | 849260c | 2014-11-17 14:58:53 +0000 | [diff] [blame] | 2522 | spin_lock(&kvm->mmu_lock); |
| 2523 | if (ret) |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2524 | unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size); |
Ard Biesheuvel | 849260c | 2014-11-17 14:58:53 +0000 | [diff] [blame] | 2525 | else |
| 2526 | stage2_flush_memslot(kvm, memslot); |
| 2527 | spin_unlock(&kvm->mmu_lock); |
Marc Zyngier | 72f3104 | 2017-03-16 18:20:50 +0000 | [diff] [blame] | 2528 | out: |
Michel Lespinasse | 89154dd | 2020-06-08 21:33:29 -0700 | [diff] [blame] | 2529 | mmap_read_unlock(current->mm); |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 2530 | return ret; |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 2531 | } |
| 2532 | |
Sean Christopherson | e96c81e | 2020-02-18 13:07:27 -0800 | [diff] [blame] | 2533 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 2534 | { |
| 2535 | } |
| 2536 | |
Sean Christopherson | 1524825 | 2019-02-05 12:54:17 -0800 | [diff] [blame] | 2537 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 2538 | { |
| 2539 | } |
| 2540 | |
| 2541 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
| 2542 | { |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2543 | kvm_free_stage2_pgd(&kvm->arch.mmu); |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 2544 | } |
| 2545 | |
| 2546 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
| 2547 | struct kvm_memory_slot *slot) |
| 2548 | { |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 2549 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; |
| 2550 | phys_addr_t size = slot->npages << PAGE_SHIFT; |
| 2551 | |
| 2552 | spin_lock(&kvm->mmu_lock); |
Christoffer Dall | a0e50aa | 2019-01-04 21:09:05 +0100 | [diff] [blame] | 2553 | unmap_stage2_range(&kvm->arch.mmu, gpa, size); |
Ard Biesheuvel | 8eef912 | 2014-10-10 17:00:32 +0200 | [diff] [blame] | 2554 | spin_unlock(&kvm->mmu_lock); |
Eric Auger | df6ce24 | 2014-06-06 11:10:23 +0200 | [diff] [blame] | 2555 | } |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 2556 | |
| 2557 | /* |
| 2558 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). |
| 2559 | * |
| 2560 | * Main problems: |
| 2561 | * - S/W ops are local to a CPU (not broadcast) |
| 2562 | * - We have line migration behind our back (speculation) |
| 2563 | * - System caches don't support S/W at all (damn!) |
| 2564 | * |
| 2565 | * In the face of the above, the best we can do is to try and convert |
| 2566 | * S/W ops to VA ops. Because the guest is not allowed to infer the |
| 2567 | * S/W to PA mapping, it can only use S/W to nuke the whole cache, |
| 2568 | * which is a rather good thing for us. |
| 2569 | * |
| 2570 | * Also, it is only used when turning caches on/off ("The expected |
| 2571 | * usage of the cache maintenance instructions that operate by set/way |
| 2572 | * is associated with the cache maintenance instructions associated |
| 2573 | * with the powerdown and powerup of caches, if this is required by |
| 2574 | * the implementation."). |
| 2575 | * |
| 2576 | * We use the following policy: |
| 2577 | * |
| 2578 | * - If we trap a S/W operation, we enable VM trapping to detect |
| 2579 | * caches being turned on/off, and do a full clean. |
| 2580 | * |
| 2581 | * - We flush the caches on both caches being turned on and off. |
| 2582 | * |
| 2583 | * - Once the caches are enabled, we stop trapping VM ops. |
| 2584 | */ |
| 2585 | void kvm_set_way_flush(struct kvm_vcpu *vcpu) |
| 2586 | { |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 2587 | unsigned long hcr = *vcpu_hcr(vcpu); |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 2588 | |
| 2589 | /* |
| 2590 | * If this is the first time we do a S/W operation |
| 2591 | * (i.e. HCR_TVM not set) flush the whole memory, and set the |
| 2592 | * VM trapping. |
| 2593 | * |
| 2594 | * Otherwise, rely on the VM trapping to wait for the MMU + |
| 2595 | * Caches to be turned off. At that point, we'll be able to |
| 2596 | * clean the caches again. |
| 2597 | */ |
| 2598 | if (!(hcr & HCR_TVM)) { |
| 2599 | trace_kvm_set_way_flush(*vcpu_pc(vcpu), |
| 2600 | vcpu_has_cache_enabled(vcpu)); |
| 2601 | stage2_flush_vm(vcpu->kvm); |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 2602 | *vcpu_hcr(vcpu) = hcr | HCR_TVM; |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 2603 | } |
| 2604 | } |
| 2605 | |
| 2606 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) |
| 2607 | { |
| 2608 | bool now_enabled = vcpu_has_cache_enabled(vcpu); |
| 2609 | |
| 2610 | /* |
| 2611 | * If switching the MMU+caches on, need to invalidate the caches. |
| 2612 | * If switching it off, need to clean the caches. |
| 2613 | * Clean + invalidate does the trick always. |
| 2614 | */ |
| 2615 | if (now_enabled != was_enabled) |
| 2616 | stage2_flush_vm(vcpu->kvm); |
| 2617 | |
| 2618 | /* Caches are now on, stop trapping VM ops (until a S/W op) */ |
| 2619 | if (now_enabled) |
Christoffer Dall | 3df59d8 | 2017-08-03 12:09:05 +0200 | [diff] [blame] | 2620 | *vcpu_hcr(vcpu) &= ~HCR_TVM; |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 2621 | |
| 2622 | trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); |
| 2623 | } |