Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * This module enables machines with Intel VT-x extensions to run virtual |
| 6 | * machines without emulation or binary translation. |
| 7 | * |
| 8 | * MMU support |
| 9 | * |
| 10 | * Copyright (C) 2006 Qumranet, Inc. |
Nicolas Kaiser | 9611c18 | 2010-10-06 14:23:22 +0200 | [diff] [blame] | 11 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 12 | * |
| 13 | * Authors: |
| 14 | * Yaniv Kamay <yaniv@qumranet.com> |
| 15 | * Avi Kivity <avi@qumranet.com> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 16 | */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 17 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 18 | #include "irq.h" |
彭浩(Richard) | 88197e6 | 2020-05-21 05:57:49 +0000 | [diff] [blame] | 19 | #include "ioapic.h" |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 20 | #include "mmu.h" |
Sean Christopherson | 6ca9a6f | 2020-06-22 13:20:31 -0700 | [diff] [blame] | 21 | #include "mmu_internal.h" |
Ben Gardon | fe5db27 | 2020-10-14 11:26:43 -0700 | [diff] [blame] | 22 | #include "tdp_mmu.h" |
Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 23 | #include "x86.h" |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 24 | #include "kvm_cache_regs.h" |
Sean Christopherson | 2f728d6 | 2020-02-18 15:29:49 -0800 | [diff] [blame] | 25 | #include "kvm_emulate.h" |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 26 | #include "cpuid.h" |
Paolo Bonzini | 5a9624a | 2020-10-16 10:29:37 -0400 | [diff] [blame] | 27 | #include "spte.h" |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 28 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 29 | #include <linux/kvm_host.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 30 | #include <linux/types.h> |
| 31 | #include <linux/string.h> |
| 32 | #include <linux/mm.h> |
| 33 | #include <linux/highmem.h> |
Paul Gortmaker | 1767e93 | 2016-07-13 20:19:00 -0400 | [diff] [blame] | 34 | #include <linux/moduleparam.h> |
| 35 | #include <linux/export.h> |
Izik Eidus | 448353c | 2007-11-26 14:08:14 +0200 | [diff] [blame] | 36 | #include <linux/swap.h> |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 37 | #include <linux/hugetlb.h> |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 38 | #include <linux/compiler.h> |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 39 | #include <linux/srcu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 40 | #include <linux/slab.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 41 | #include <linux/sched/signal.h> |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 42 | #include <linux/uaccess.h> |
David Matlack | 114df30 | 2016-12-19 13:58:25 -0800 | [diff] [blame] | 43 | #include <linux/hash.h> |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 44 | #include <linux/kern_levels.h> |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 45 | #include <linux/kthread.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 46 | |
| 47 | #include <asm/page.h> |
Ingo Molnar | eb243d1 | 2019-11-20 15:33:57 +0100 | [diff] [blame] | 48 | #include <asm/memtype.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 49 | #include <asm/cmpxchg.h> |
Avi Kivity | 4e54237 | 2007-11-21 14:08:40 +0200 | [diff] [blame] | 50 | #include <asm/io.h> |
Sean Christopherson | 4a98623 | 2021-03-09 14:42:07 -0800 | [diff] [blame] | 51 | #include <asm/set_memory.h> |
Eduardo Habkost | 13673a9 | 2008-11-17 19:03:13 -0200 | [diff] [blame] | 52 | #include <asm/vmx.h> |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 53 | #include <asm/kvm_page_track.h> |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 54 | #include "trace.h" |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 55 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 56 | extern bool itlb_multihit_kvm_mitigation; |
| 57 | |
Shaokun Zhang | a9d6496 | 2021-05-27 15:57:51 +0800 | [diff] [blame] | 58 | int __read_mostly nx_huge_pages = -1; |
Paolo Bonzini | 13fb592 | 2019-11-13 15:47:06 +0100 | [diff] [blame] | 59 | #ifdef CONFIG_PREEMPT_RT |
| 60 | /* Recovery can cause latency spikes, disable it for PREEMPT_RT. */ |
| 61 | static uint __read_mostly nx_huge_pages_recovery_ratio = 0; |
| 62 | #else |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 63 | static uint __read_mostly nx_huge_pages_recovery_ratio = 60; |
Paolo Bonzini | 13fb592 | 2019-11-13 15:47:06 +0100 | [diff] [blame] | 64 | #endif |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 65 | |
| 66 | static int set_nx_huge_pages(const char *val, const struct kernel_param *kp); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 67 | static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 68 | |
Joe Perches | d5d6c18 | 2020-10-03 17:18:07 -0700 | [diff] [blame] | 69 | static const struct kernel_param_ops nx_huge_pages_ops = { |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 70 | .set = set_nx_huge_pages, |
| 71 | .get = param_get_bool, |
| 72 | }; |
| 73 | |
Joe Perches | d5d6c18 | 2020-10-03 17:18:07 -0700 | [diff] [blame] | 74 | static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = { |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 75 | .set = set_nx_huge_pages_recovery_ratio, |
| 76 | .get = param_get_uint, |
| 77 | }; |
| 78 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 79 | module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644); |
| 80 | __MODULE_PARM_TYPE(nx_huge_pages, "bool"); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 81 | module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops, |
| 82 | &nx_huge_pages_recovery_ratio, 0644); |
| 83 | __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint"); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 84 | |
Sean Christopherson | 71fe701 | 2020-03-20 14:28:28 -0700 | [diff] [blame] | 85 | static bool __read_mostly force_flush_and_sync_on_reuse; |
| 86 | module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644); |
| 87 | |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 88 | /* |
| 89 | * When setting this variable to true it enables Two-Dimensional-Paging |
| 90 | * where the hardware walks 2 page tables: |
| 91 | * 1. the guest-virtual to guest-physical |
| 92 | * 2. while doing 1. it walks guest-physical to host-physical |
| 93 | * If the hardware supports that we don't need to do shadow paging. |
| 94 | */ |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 95 | bool tdp_enabled = false; |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 96 | |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 97 | static int max_huge_page_level __read_mostly; |
Sean Christopherson | 8301305 | 2020-07-15 20:41:22 -0700 | [diff] [blame] | 98 | static int max_tdp_level __read_mostly; |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 99 | |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 100 | enum { |
| 101 | AUDIT_PRE_PAGE_FAULT, |
| 102 | AUDIT_POST_PAGE_FAULT, |
| 103 | AUDIT_PRE_PTE_WRITE, |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 104 | AUDIT_POST_PTE_WRITE, |
| 105 | AUDIT_PRE_SYNC, |
| 106 | AUDIT_POST_SYNC |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 107 | }; |
| 108 | |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 109 | #ifdef MMU_DEBUG |
Paolo Bonzini | 5a9624a | 2020-10-16 10:29:37 -0400 | [diff] [blame] | 110 | bool dbg = 0; |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 111 | module_param(dbg, bool, 0644); |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 112 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 113 | |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 114 | #define PTE_PREFETCH_NUM 8 |
| 115 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 116 | #define PT32_LEVEL_BITS 10 |
| 117 | |
| 118 | #define PT32_LEVEL_SHIFT(level) \ |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 119 | (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 120 | |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 121 | #define PT32_LVL_OFFSET_MASK(level) \ |
| 122 | (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 123 | * PT32_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 124 | |
| 125 | #define PT32_INDEX(address, level)\ |
| 126 | (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) |
| 127 | |
| 128 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 129 | #define PT32_BASE_ADDR_MASK PAGE_MASK |
| 130 | #define PT32_DIR_BASE_ADDR_MASK \ |
| 131 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 132 | #define PT32_LVL_ADDR_MASK(level) \ |
| 133 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 134 | * PT32_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 135 | |
Avi Kivity | 90bb6fc | 2009-12-31 12:10:16 +0200 | [diff] [blame] | 136 | #include <trace/events/kvm.h> |
| 137 | |
Takuya Yoshikawa | 220f773 | 2012-03-21 23:49:39 +0900 | [diff] [blame] | 138 | /* make pte_list_desc fit well in cache line */ |
| 139 | #define PTE_LIST_EXT 3 |
| 140 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 141 | struct pte_list_desc { |
| 142 | u64 *sptes[PTE_LIST_EXT]; |
| 143 | struct pte_list_desc *more; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 144 | }; |
| 145 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 146 | struct kvm_shadow_walk_iterator { |
| 147 | u64 addr; |
| 148 | hpa_t shadow_addr; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 149 | u64 *sptep; |
Xiao Guangrong | dd3bfd5 | 2011-07-12 03:32:54 +0800 | [diff] [blame] | 150 | int level; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 151 | unsigned index; |
| 152 | }; |
| 153 | |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 154 | #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \ |
| 155 | for (shadow_walk_init_using_root(&(_walker), (_vcpu), \ |
| 156 | (_root), (_addr)); \ |
| 157 | shadow_walk_okay(&(_walker)); \ |
| 158 | shadow_walk_next(&(_walker))) |
| 159 | |
| 160 | #define for_each_shadow_entry(_vcpu, _addr, _walker) \ |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 161 | for (shadow_walk_init(&(_walker), _vcpu, _addr); \ |
| 162 | shadow_walk_okay(&(_walker)); \ |
| 163 | shadow_walk_next(&(_walker))) |
| 164 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 165 | #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ |
| 166 | for (shadow_walk_init(&(_walker), _vcpu, _addr); \ |
| 167 | shadow_walk_okay(&(_walker)) && \ |
| 168 | ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ |
| 169 | __shadow_walk_next(&(_walker), spte)) |
| 170 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 171 | static struct kmem_cache *pte_list_desc_cache; |
Ben Gardon | 02c00b3 | 2020-10-14 20:26:44 +0200 | [diff] [blame] | 172 | struct kmem_cache *mmu_page_header_cache; |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 173 | static struct percpu_counter kvm_total_used_mmu_pages; |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 174 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 175 | static void mmu_spte_set(u64 *sptep, u64 spte); |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 176 | static union kvm_mmu_page_role |
| 177 | kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 178 | |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 179 | #define CREATE_TRACE_POINTS |
| 180 | #include "mmutrace.h" |
| 181 | |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 182 | |
| 183 | static inline bool kvm_available_flush_tlb_with_range(void) |
| 184 | { |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 185 | return kvm_x86_ops.tlb_remote_flush_with_range; |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, |
| 189 | struct kvm_tlb_range *range) |
| 190 | { |
| 191 | int ret = -ENOTSUPP; |
| 192 | |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 193 | if (range && kvm_x86_ops.tlb_remote_flush_with_range) |
Jason Baron | b3646477 | 2021-01-14 22:27:56 -0500 | [diff] [blame] | 194 | ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range); |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 195 | |
| 196 | if (ret) |
| 197 | kvm_flush_remote_tlbs(kvm); |
| 198 | } |
| 199 | |
Ben Gardon | 2f2fad0 | 2020-10-14 20:26:45 +0200 | [diff] [blame] | 200 | void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 201 | u64 start_gfn, u64 pages) |
| 202 | { |
| 203 | struct kvm_tlb_range range; |
| 204 | |
| 205 | range.start_gfn = start_gfn; |
| 206 | range.pages = pages; |
| 207 | |
| 208 | kvm_flush_remote_tlbs_with_range(kvm, &range); |
| 209 | } |
| 210 | |
Ben Gardon | 8f79b06 | 2020-02-03 15:09:10 -0800 | [diff] [blame] | 211 | static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, |
| 212 | unsigned int access) |
| 213 | { |
Sean Christopherson | c236d96 | 2021-02-25 12:47:34 -0800 | [diff] [blame] | 214 | u64 spte = make_mmio_spte(vcpu, gfn, access); |
Ben Gardon | 8f79b06 | 2020-02-03 15:09:10 -0800 | [diff] [blame] | 215 | |
Sean Christopherson | c236d96 | 2021-02-25 12:47:34 -0800 | [diff] [blame] | 216 | trace_mark_mmio_spte(sptep, gfn, spte); |
| 217 | mmu_spte_set(sptep, spte); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 218 | } |
| 219 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 220 | static gfn_t get_mmio_spte_gfn(u64 spte) |
| 221 | { |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 222 | u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 223 | |
Paolo Bonzini | 8a967d6 | 2020-10-30 13:39:55 -0400 | [diff] [blame] | 224 | gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN) |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 225 | & shadow_nonpresent_or_rsvd_mask; |
| 226 | |
| 227 | return gpa >> PAGE_SHIFT; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | static unsigned get_mmio_spte_access(u64 spte) |
| 231 | { |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 232 | return spte & shadow_mmio_access_mask; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 233 | } |
| 234 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 235 | static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 236 | { |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 237 | u64 kvm_gen, spte_gen, gen; |
Xiao Guangrong | 089504c | 2013-06-07 16:51:27 +0800 | [diff] [blame] | 238 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 239 | gen = kvm_vcpu_memslots(vcpu)->generation; |
| 240 | if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) |
| 241 | return false; |
| 242 | |
| 243 | kvm_gen = gen & MMIO_SPTE_GEN_MASK; |
Xiao Guangrong | 089504c | 2013-06-07 16:51:27 +0800 | [diff] [blame] | 244 | spte_gen = get_mmio_spte_generation(spte); |
| 245 | |
| 246 | trace_check_mmio_spte(spte, kvm_gen, spte_gen); |
| 247 | return likely(kvm_gen == spte_gen); |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 248 | } |
| 249 | |
Mohammed Gamal | cd31356 | 2020-07-10 17:48:04 +0200 | [diff] [blame] | 250 | static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
| 251 | struct x86_exception *exception) |
| 252 | { |
Mohammed Gamal | ec7771a | 2020-07-10 17:48:05 +0200 | [diff] [blame] | 253 | /* Check if guest physical address doesn't exceed guest maximum */ |
Sean Christopherson | dc46515 | 2020-09-24 12:42:49 -0700 | [diff] [blame] | 254 | if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) { |
Mohammed Gamal | ec7771a | 2020-07-10 17:48:05 +0200 | [diff] [blame] | 255 | exception->error_code |= PFERR_RSVD_MASK; |
| 256 | return UNMAPPED_GVA; |
| 257 | } |
| 258 | |
Mohammed Gamal | cd31356 | 2020-07-10 17:48:04 +0200 | [diff] [blame] | 259 | return gpa; |
| 260 | } |
| 261 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 262 | static int is_cpuid_PSE36(void) |
| 263 | { |
| 264 | return 1; |
| 265 | } |
| 266 | |
Avi Kivity | 73b1087 | 2007-01-26 00:56:41 -0800 | [diff] [blame] | 267 | static int is_nx(struct kvm_vcpu *vcpu) |
| 268 | { |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 269 | return vcpu->arch.efer & EFER_NX; |
Avi Kivity | 73b1087 | 2007-01-26 00:56:41 -0800 | [diff] [blame] | 270 | } |
| 271 | |
Avi Kivity | da928521 | 2007-11-21 13:54:47 +0200 | [diff] [blame] | 272 | static gfn_t pse36_gfn_delta(u32 gpte) |
| 273 | { |
| 274 | int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; |
| 275 | |
| 276 | return (gpte & PT32_DIR_PSE36_MASK) << shift; |
| 277 | } |
| 278 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 279 | #ifdef CONFIG_X86_64 |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 280 | static void __set_spte(u64 *sptep, u64 spte) |
Avi Kivity | e663ee6 | 2007-05-31 15:46:04 +0300 | [diff] [blame] | 281 | { |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 282 | WRITE_ONCE(*sptep, spte); |
Avi Kivity | e663ee6 | 2007-05-31 15:46:04 +0300 | [diff] [blame] | 283 | } |
| 284 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 285 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
Avi Kivity | a9221dd | 2010-06-06 14:48:06 +0300 | [diff] [blame] | 286 | { |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 287 | WRITE_ONCE(*sptep, spte); |
Avi Kivity | a9221dd | 2010-06-06 14:48:06 +0300 | [diff] [blame] | 288 | } |
| 289 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 290 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
| 291 | { |
| 292 | return xchg(sptep, spte); |
| 293 | } |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 294 | |
| 295 | static u64 __get_spte_lockless(u64 *sptep) |
| 296 | { |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 297 | return READ_ONCE(*sptep); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 298 | } |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 299 | #else |
| 300 | union split_spte { |
| 301 | struct { |
| 302 | u32 spte_low; |
| 303 | u32 spte_high; |
| 304 | }; |
| 305 | u64 spte; |
| 306 | }; |
| 307 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 308 | static void count_spte_clear(u64 *sptep, u64 spte) |
| 309 | { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 310 | struct kvm_mmu_page *sp = sptep_to_sp(sptep); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 311 | |
| 312 | if (is_shadow_present_pte(spte)) |
| 313 | return; |
| 314 | |
| 315 | /* Ensure the spte is completely set before we increase the count */ |
| 316 | smp_wmb(); |
| 317 | sp->clear_spte_count++; |
| 318 | } |
| 319 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 320 | static void __set_spte(u64 *sptep, u64 spte) |
| 321 | { |
| 322 | union split_spte *ssptep, sspte; |
| 323 | |
| 324 | ssptep = (union split_spte *)sptep; |
| 325 | sspte = (union split_spte)spte; |
| 326 | |
| 327 | ssptep->spte_high = sspte.spte_high; |
| 328 | |
| 329 | /* |
| 330 | * If we map the spte from nonpresent to present, We should store |
| 331 | * the high bits firstly, then set present bit, so cpu can not |
| 332 | * fetch this spte while we are setting the spte. |
| 333 | */ |
| 334 | smp_wmb(); |
| 335 | |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 336 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 337 | } |
| 338 | |
| 339 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
| 340 | { |
| 341 | union split_spte *ssptep, sspte; |
| 342 | |
| 343 | ssptep = (union split_spte *)sptep; |
| 344 | sspte = (union split_spte)spte; |
| 345 | |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 346 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 347 | |
| 348 | /* |
| 349 | * If we map the spte from present to nonpresent, we should clear |
| 350 | * present bit firstly to avoid vcpu fetch the old high bits. |
| 351 | */ |
| 352 | smp_wmb(); |
| 353 | |
| 354 | ssptep->spte_high = sspte.spte_high; |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 355 | count_spte_clear(sptep, spte); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 356 | } |
| 357 | |
| 358 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
| 359 | { |
| 360 | union split_spte *ssptep, sspte, orig; |
| 361 | |
| 362 | ssptep = (union split_spte *)sptep; |
| 363 | sspte = (union split_spte)spte; |
| 364 | |
| 365 | /* xchg acts as a barrier before the setting of the high bits */ |
| 366 | orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); |
Zhao Jin | 41bc318 | 2011-09-19 12:19:51 +0800 | [diff] [blame] | 367 | orig.spte_high = ssptep->spte_high; |
| 368 | ssptep->spte_high = sspte.spte_high; |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 369 | count_spte_clear(sptep, spte); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 370 | |
| 371 | return orig.spte; |
| 372 | } |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 373 | |
| 374 | /* |
| 375 | * The idea using the light way get the spte on x86_32 guest is from |
Christoph Hellwig | 39656e8 | 2019-07-11 20:56:49 -0700 | [diff] [blame] | 376 | * gup_get_pte (mm/gup.c). |
Xiao Guangrong | accaefe | 2013-06-19 17:09:20 +0800 | [diff] [blame] | 377 | * |
| 378 | * An spte tlb flush may be pending, because kvm_set_pte_rmapp |
| 379 | * coalesces them and we are running out of the MMU lock. Therefore |
| 380 | * we need to protect against in-progress updates of the spte. |
| 381 | * |
| 382 | * Reading the spte while an update is in progress may get the old value |
| 383 | * for the high part of the spte. The race is fine for a present->non-present |
| 384 | * change (because the high part of the spte is ignored for non-present spte), |
| 385 | * but for a present->present change we must reread the spte. |
| 386 | * |
| 387 | * All such changes are done in two steps (present->non-present and |
| 388 | * non-present->present), hence it is enough to count the number of |
| 389 | * present->non-present updates: if it changed while reading the spte, |
| 390 | * we might have hit the race. This is done using clear_spte_count. |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 391 | */ |
| 392 | static u64 __get_spte_lockless(u64 *sptep) |
| 393 | { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 394 | struct kvm_mmu_page *sp = sptep_to_sp(sptep); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 395 | union split_spte spte, *orig = (union split_spte *)sptep; |
| 396 | int count; |
| 397 | |
| 398 | retry: |
| 399 | count = sp->clear_spte_count; |
| 400 | smp_rmb(); |
| 401 | |
| 402 | spte.spte_low = orig->spte_low; |
| 403 | smp_rmb(); |
| 404 | |
| 405 | spte.spte_high = orig->spte_high; |
| 406 | smp_rmb(); |
| 407 | |
| 408 | if (unlikely(spte.spte_low != orig->spte_low || |
| 409 | count != sp->clear_spte_count)) |
| 410 | goto retry; |
| 411 | |
| 412 | return spte.spte; |
| 413 | } |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 414 | #endif |
| 415 | |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 416 | static bool spte_has_volatile_bits(u64 spte) |
| 417 | { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 418 | if (!is_shadow_present_pte(spte)) |
| 419 | return false; |
| 420 | |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 421 | /* |
Adam Buchbinder | 6a6256f | 2016-02-23 15:34:30 -0800 | [diff] [blame] | 422 | * Always atomically update spte if it can be updated |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 423 | * out of mmu-lock, it can ensure dirty bit is not lost, |
| 424 | * also, it can help us to get a stable is_writable_pte() |
| 425 | * to ensure tlb flush is not missed. |
| 426 | */ |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 427 | if (spte_can_locklessly_be_made_writable(spte) || |
| 428 | is_access_track_spte(spte)) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 429 | return true; |
| 430 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 431 | if (spte_ad_enabled(spte)) { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 432 | if ((spte & shadow_accessed_mask) == 0 || |
| 433 | (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0)) |
| 434 | return true; |
| 435 | } |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 436 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 437 | return false; |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 438 | } |
| 439 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 440 | /* Rules for using mmu_spte_set: |
| 441 | * Set the sptep from nonpresent to present. |
| 442 | * Note: the sptep being assigned *must* be either not present |
| 443 | * or in a state where the hardware will not attempt to update |
| 444 | * the spte. |
| 445 | */ |
| 446 | static void mmu_spte_set(u64 *sptep, u64 new_spte) |
| 447 | { |
| 448 | WARN_ON(is_shadow_present_pte(*sptep)); |
| 449 | __set_spte(sptep, new_spte); |
| 450 | } |
| 451 | |
Junaid Shahid | f39a058 | 2016-12-06 16:46:14 -0800 | [diff] [blame] | 452 | /* |
| 453 | * Update the SPTE (excluding the PFN), but do not track changes in its |
| 454 | * accessed/dirty status. |
| 455 | */ |
| 456 | static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) |
| 457 | { |
| 458 | u64 old_spte = *sptep; |
| 459 | |
| 460 | WARN_ON(!is_shadow_present_pte(new_spte)); |
| 461 | |
| 462 | if (!is_shadow_present_pte(old_spte)) { |
| 463 | mmu_spte_set(sptep, new_spte); |
| 464 | return old_spte; |
| 465 | } |
| 466 | |
| 467 | if (!spte_has_volatile_bits(old_spte)) |
| 468 | __update_clear_spte_fast(sptep, new_spte); |
| 469 | else |
| 470 | old_spte = __update_clear_spte_slow(sptep, new_spte); |
| 471 | |
| 472 | WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte)); |
| 473 | |
| 474 | return old_spte; |
| 475 | } |
| 476 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 477 | /* Rules for using mmu_spte_update: |
Andrea Gelmini | bb3541f | 2016-05-21 14:14:44 +0200 | [diff] [blame] | 478 | * Update the state bits, it means the mapped pfn is not changed. |
Xiao Guangrong | 6e7d035 | 2012-06-20 15:58:33 +0800 | [diff] [blame] | 479 | * |
| 480 | * Whenever we overwrite a writable spte with a read-only one we |
| 481 | * should flush remote TLBs. Otherwise rmap_write_protect |
| 482 | * will find a read-only spte, even though the writable spte |
| 483 | * might be cached on a CPU's TLB, the return value indicates this |
| 484 | * case. |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 485 | * |
| 486 | * Returns true if the TLB needs to be flushed |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 487 | */ |
Xiao Guangrong | 6e7d035 | 2012-06-20 15:58:33 +0800 | [diff] [blame] | 488 | static bool mmu_spte_update(u64 *sptep, u64 new_spte) |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 489 | { |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 490 | bool flush = false; |
Junaid Shahid | f39a058 | 2016-12-06 16:46:14 -0800 | [diff] [blame] | 491 | u64 old_spte = mmu_spte_update_no_track(sptep, new_spte); |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 492 | |
Junaid Shahid | f39a058 | 2016-12-06 16:46:14 -0800 | [diff] [blame] | 493 | if (!is_shadow_present_pte(old_spte)) |
| 494 | return false; |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 495 | |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 496 | /* |
| 497 | * For the spte updated out of mmu-lock is safe, since |
Adam Buchbinder | 6a6256f | 2016-02-23 15:34:30 -0800 | [diff] [blame] | 498 | * we always atomically update it, see the comments in |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 499 | * spte_has_volatile_bits(). |
| 500 | */ |
Junaid Shahid | ea4114b | 2016-12-06 16:46:11 -0800 | [diff] [blame] | 501 | if (spte_can_locklessly_be_made_writable(old_spte) && |
Xiao Guangrong | 7f31c95 | 2014-04-17 17:06:15 +0800 | [diff] [blame] | 502 | !is_writable_pte(new_spte)) |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 503 | flush = true; |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 504 | |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 505 | /* |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 506 | * Flush TLB when accessed/dirty states are changed in the page tables, |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 507 | * to guarantee consistency between TLB and page tables. |
| 508 | */ |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 509 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 510 | if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) { |
| 511 | flush = true; |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 512 | kvm_set_pfn_accessed(spte_to_pfn(old_spte)); |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 513 | } |
Xiao Guangrong | 6e7d035 | 2012-06-20 15:58:33 +0800 | [diff] [blame] | 514 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 515 | if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) { |
| 516 | flush = true; |
| 517 | kvm_set_pfn_dirty(spte_to_pfn(old_spte)); |
| 518 | } |
| 519 | |
| 520 | return flush; |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 521 | } |
| 522 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 523 | /* |
| 524 | * Rules for using mmu_spte_clear_track_bits: |
| 525 | * It sets the sptep from present to nonpresent, and track the |
| 526 | * state bits, it is used to clear the last level sptep. |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 527 | * Returns non-zero if the PTE was previously valid. |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 528 | */ |
| 529 | static int mmu_spte_clear_track_bits(u64 *sptep) |
| 530 | { |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 531 | kvm_pfn_t pfn; |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 532 | u64 old_spte = *sptep; |
| 533 | |
| 534 | if (!spte_has_volatile_bits(old_spte)) |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 535 | __update_clear_spte_fast(sptep, 0ull); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 536 | else |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 537 | old_spte = __update_clear_spte_slow(sptep, 0ull); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 538 | |
Takuya Yoshikawa | afd28fe | 2015-11-20 17:44:55 +0900 | [diff] [blame] | 539 | if (!is_shadow_present_pte(old_spte)) |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 540 | return 0; |
| 541 | |
| 542 | pfn = spte_to_pfn(old_spte); |
Xiao Guangrong | 86fde74 | 2012-07-17 21:52:52 +0800 | [diff] [blame] | 543 | |
| 544 | /* |
| 545 | * KVM does not hold the refcount of the page used by |
| 546 | * kvm mmu, before reclaiming the page, we should |
| 547 | * unmap it from mmu first. |
| 548 | */ |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 549 | WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); |
Xiao Guangrong | 86fde74 | 2012-07-17 21:52:52 +0800 | [diff] [blame] | 550 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 551 | if (is_accessed_spte(old_spte)) |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 552 | kvm_set_pfn_accessed(pfn); |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 553 | |
| 554 | if (is_dirty_spte(old_spte)) |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 555 | kvm_set_pfn_dirty(pfn); |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 556 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 557 | return 1; |
| 558 | } |
| 559 | |
| 560 | /* |
| 561 | * Rules for using mmu_spte_clear_no_track: |
| 562 | * Directly clear spte without caring the state bits of sptep, |
| 563 | * it is used to set the upper level spte. |
| 564 | */ |
| 565 | static void mmu_spte_clear_no_track(u64 *sptep) |
| 566 | { |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 567 | __update_clear_spte_fast(sptep, 0ull); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 568 | } |
| 569 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 570 | static u64 mmu_spte_get_lockless(u64 *sptep) |
| 571 | { |
| 572 | return __get_spte_lockless(sptep); |
| 573 | } |
| 574 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 575 | /* Restore an acc-track PTE back to a regular PTE */ |
| 576 | static u64 restore_acc_track_spte(u64 spte) |
| 577 | { |
| 578 | u64 new_spte = spte; |
Paolo Bonzini | 8a967d6 | 2020-10-30 13:39:55 -0400 | [diff] [blame] | 579 | u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) |
| 580 | & SHADOW_ACC_TRACK_SAVED_BITS_MASK; |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 581 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 582 | WARN_ON_ONCE(spte_ad_enabled(spte)); |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 583 | WARN_ON_ONCE(!is_access_track_spte(spte)); |
| 584 | |
| 585 | new_spte &= ~shadow_acc_track_mask; |
Paolo Bonzini | 8a967d6 | 2020-10-30 13:39:55 -0400 | [diff] [blame] | 586 | new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK << |
| 587 | SHADOW_ACC_TRACK_SAVED_BITS_SHIFT); |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 588 | new_spte |= saved_bits; |
| 589 | |
| 590 | return new_spte; |
| 591 | } |
| 592 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 593 | /* Returns the Accessed status of the PTE and resets it at the same time. */ |
| 594 | static bool mmu_spte_age(u64 *sptep) |
| 595 | { |
| 596 | u64 spte = mmu_spte_get_lockless(sptep); |
| 597 | |
| 598 | if (!is_accessed_spte(spte)) |
| 599 | return false; |
| 600 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 601 | if (spte_ad_enabled(spte)) { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 602 | clear_bit((ffs(shadow_accessed_mask) - 1), |
| 603 | (unsigned long *)sptep); |
| 604 | } else { |
| 605 | /* |
| 606 | * Capture the dirty status of the page, so that it doesn't get |
| 607 | * lost when the SPTE is marked for access tracking. |
| 608 | */ |
| 609 | if (is_writable_pte(spte)) |
| 610 | kvm_set_pfn_dirty(spte_to_pfn(spte)); |
| 611 | |
| 612 | spte = mark_spte_for_access_track(spte); |
| 613 | mmu_spte_update_no_track(sptep, spte); |
| 614 | } |
| 615 | |
| 616 | return true; |
| 617 | } |
| 618 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 619 | static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) |
| 620 | { |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 621 | /* |
| 622 | * Prevent page table teardown by making any free-er wait during |
| 623 | * kvm_flush_remote_tlbs() IPI to all active vcpus. |
| 624 | */ |
| 625 | local_irq_disable(); |
Lan Tianyu | 36ca7e0 | 2016-03-13 11:10:25 +0800 | [diff] [blame] | 626 | |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 627 | /* |
| 628 | * Make sure a following spte read is not reordered ahead of the write |
| 629 | * to vcpu->mode. |
| 630 | */ |
Lan Tianyu | 36ca7e0 | 2016-03-13 11:10:25 +0800 | [diff] [blame] | 631 | smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 632 | } |
| 633 | |
| 634 | static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) |
| 635 | { |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 636 | /* |
| 637 | * Make sure the write to vcpu->mode is not reordered in front of |
Tianyu Lan | 9a98458 | 2018-09-07 05:45:02 +0000 | [diff] [blame] | 638 | * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 639 | * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. |
| 640 | */ |
Lan Tianyu | 36ca7e0 | 2016-03-13 11:10:25 +0800 | [diff] [blame] | 641 | smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 642 | local_irq_enable(); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 643 | } |
| 644 | |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 645 | static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) |
Avi Kivity | 8c43850 | 2007-04-16 11:53:17 +0300 | [diff] [blame] | 646 | { |
| 647 | int r; |
| 648 | |
Sean Christopherson | 531281a | 2020-07-02 19:35:32 -0700 | [diff] [blame] | 649 | /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */ |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 650 | r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
| 651 | 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM); |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 652 | if (r) |
Sean Christopherson | 284aa86 | 2020-07-02 19:35:28 -0700 | [diff] [blame] | 653 | return r; |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 654 | r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache, |
| 655 | PT64_ROOT_MAX_LEVEL); |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 656 | if (r) |
Sean Christopherson | 171a90d | 2020-07-02 19:35:33 -0700 | [diff] [blame] | 657 | return r; |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 658 | if (maybe_indirect) { |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 659 | r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache, |
| 660 | PT64_ROOT_MAX_LEVEL); |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 661 | if (r) |
| 662 | return r; |
| 663 | } |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 664 | return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, |
| 665 | PT64_ROOT_MAX_LEVEL); |
Avi Kivity | 8c43850 | 2007-04-16 11:53:17 +0300 | [diff] [blame] | 666 | } |
| 667 | |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 668 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
| 669 | { |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 670 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); |
| 671 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache); |
| 672 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache); |
| 673 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 674 | } |
| 675 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 676 | static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 677 | { |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 678 | return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 679 | } |
| 680 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 681 | static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 682 | { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 683 | kmem_cache_free(pte_list_desc_cache, pte_list_desc); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 684 | } |
| 685 | |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 686 | static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) |
| 687 | { |
| 688 | if (!sp->role.direct) |
| 689 | return sp->gfns[index]; |
| 690 | |
| 691 | return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); |
| 692 | } |
| 693 | |
| 694 | static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) |
| 695 | { |
Paolo Bonzini | e9f2a76 | 2019-06-30 08:36:21 -0400 | [diff] [blame] | 696 | if (!sp->role.direct) { |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 697 | sp->gfns[index] = gfn; |
Paolo Bonzini | e9f2a76 | 2019-06-30 08:36:21 -0400 | [diff] [blame] | 698 | return; |
| 699 | } |
| 700 | |
| 701 | if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) |
| 702 | pr_err_ratelimited("gfn mismatch under direct page %llx " |
| 703 | "(expected %llx, got %llx)\n", |
| 704 | sp->gfn, |
| 705 | kvm_mmu_page_get_gfn(sp, index), gfn); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 706 | } |
| 707 | |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 708 | /* |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 709 | * Return the pointer to the large page information for a given gfn, |
| 710 | * handling slots that are not large page aligned. |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 711 | */ |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 712 | static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, |
Ben Gardon | 8ca6f06 | 2021-04-01 16:37:24 -0700 | [diff] [blame] | 713 | const struct kvm_memory_slot *slot, int level) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 714 | { |
| 715 | unsigned long idx; |
| 716 | |
Takuya Yoshikawa | fb03cb6 | 2012-02-08 12:59:10 +0900 | [diff] [blame] | 717 | idx = gfn_to_index(gfn, slot->base_gfn, level); |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 718 | return &slot->arch.lpage_info[level - 2][idx]; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 719 | } |
| 720 | |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 721 | static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, |
| 722 | gfn_t gfn, int count) |
| 723 | { |
| 724 | struct kvm_lpage_info *linfo; |
| 725 | int i; |
| 726 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 727 | for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 728 | linfo = lpage_info_slot(gfn, slot, i); |
| 729 | linfo->disallow_lpage += count; |
| 730 | WARN_ON(linfo->disallow_lpage < 0); |
| 731 | } |
| 732 | } |
| 733 | |
| 734 | void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) |
| 735 | { |
| 736 | update_gfn_disallow_lpage_count(slot, gfn, 1); |
| 737 | } |
| 738 | |
| 739 | void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) |
| 740 | { |
| 741 | update_gfn_disallow_lpage_count(slot, gfn, -1); |
| 742 | } |
| 743 | |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 744 | static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 745 | { |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 746 | struct kvm_memslots *slots; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 747 | struct kvm_memory_slot *slot; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 748 | gfn_t gfn; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 749 | |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 750 | kvm->arch.indirect_shadow_pages++; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 751 | gfn = sp->gfn; |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 752 | slots = kvm_memslots_for_spte_role(kvm, sp->role); |
| 753 | slot = __gfn_to_memslot(slots, gfn); |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 754 | |
| 755 | /* the non-leaf shadow pages are keeping readonly. */ |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 756 | if (sp->role.level > PG_LEVEL_4K) |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 757 | return kvm_slot_page_track_add_page(kvm, slot, gfn, |
| 758 | KVM_PAGE_TRACK_WRITE); |
| 759 | |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 760 | kvm_mmu_gfn_disallow_lpage(slot, gfn); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 761 | } |
| 762 | |
Ben Gardon | 29cf0f5 | 2020-10-14 11:27:00 -0700 | [diff] [blame] | 763 | void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 764 | { |
| 765 | if (sp->lpage_disallowed) |
| 766 | return; |
| 767 | |
| 768 | ++kvm->stat.nx_lpage_splits; |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 769 | list_add_tail(&sp->lpage_disallowed_link, |
| 770 | &kvm->arch.lpage_disallowed_mmu_pages); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 771 | sp->lpage_disallowed = true; |
| 772 | } |
| 773 | |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 774 | static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 775 | { |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 776 | struct kvm_memslots *slots; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 777 | struct kvm_memory_slot *slot; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 778 | gfn_t gfn; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 779 | |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 780 | kvm->arch.indirect_shadow_pages--; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 781 | gfn = sp->gfn; |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 782 | slots = kvm_memslots_for_spte_role(kvm, sp->role); |
| 783 | slot = __gfn_to_memslot(slots, gfn); |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 784 | if (sp->role.level > PG_LEVEL_4K) |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 785 | return kvm_slot_page_track_remove_page(kvm, slot, gfn, |
| 786 | KVM_PAGE_TRACK_WRITE); |
| 787 | |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 788 | kvm_mmu_gfn_allow_lpage(slot, gfn); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 789 | } |
| 790 | |
Ben Gardon | 29cf0f5 | 2020-10-14 11:27:00 -0700 | [diff] [blame] | 791 | void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 792 | { |
| 793 | --kvm->stat.nx_lpage_splits; |
| 794 | sp->lpage_disallowed = false; |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 795 | list_del(&sp->lpage_disallowed_link); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 796 | } |
| 797 | |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 798 | static struct kvm_memory_slot * |
| 799 | gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 800 | bool no_dirty_log) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 801 | { |
| 802 | struct kvm_memory_slot *slot; |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 803 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 804 | slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
Paolo Bonzini | 91b0d26 | 2020-01-21 16:16:32 +0100 | [diff] [blame] | 805 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID) |
| 806 | return NULL; |
Peter Xu | 044c59c | 2020-09-30 21:22:26 -0400 | [diff] [blame] | 807 | if (no_dirty_log && kvm_slot_dirty_track_enabled(slot)) |
Paolo Bonzini | 91b0d26 | 2020-01-21 16:16:32 +0100 | [diff] [blame] | 808 | return NULL; |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 809 | |
| 810 | return slot; |
| 811 | } |
| 812 | |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 813 | /* |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 814 | * About rmap_head encoding: |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 815 | * |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 816 | * If the bit zero of rmap_head->val is clear, then it points to the only spte |
| 817 | * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 818 | * pte_list_desc containing more mappings. |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 819 | */ |
| 820 | |
| 821 | /* |
| 822 | * Returns the number of pointers in the rmap chain, not counting the new one. |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 823 | */ |
| 824 | static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 825 | struct kvm_rmap_head *rmap_head) |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 826 | { |
| 827 | struct pte_list_desc *desc; |
| 828 | int i, count = 0; |
| 829 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 830 | if (!rmap_head->val) { |
Stephen Zhang | 805a0f8 | 2021-01-27 10:08:45 +0800 | [diff] [blame] | 831 | rmap_printk("%p %llx 0->1\n", spte, *spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 832 | rmap_head->val = (unsigned long)spte; |
| 833 | } else if (!(rmap_head->val & 1)) { |
Stephen Zhang | 805a0f8 | 2021-01-27 10:08:45 +0800 | [diff] [blame] | 834 | rmap_printk("%p %llx 1->many\n", spte, *spte); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 835 | desc = mmu_alloc_pte_list_desc(vcpu); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 836 | desc->sptes[0] = (u64 *)rmap_head->val; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 837 | desc->sptes[1] = spte; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 838 | rmap_head->val = (unsigned long)desc | 1; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 839 | ++count; |
| 840 | } else { |
Stephen Zhang | 805a0f8 | 2021-01-27 10:08:45 +0800 | [diff] [blame] | 841 | rmap_printk("%p %llx many->many\n", spte, *spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 842 | desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); |
Li RongQing | c6c4f96 | 2020-09-27 16:44:57 +0800 | [diff] [blame] | 843 | while (desc->sptes[PTE_LIST_EXT-1]) { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 844 | count += PTE_LIST_EXT; |
Li RongQing | c6c4f96 | 2020-09-27 16:44:57 +0800 | [diff] [blame] | 845 | |
| 846 | if (!desc->more) { |
| 847 | desc->more = mmu_alloc_pte_list_desc(vcpu); |
| 848 | desc = desc->more; |
| 849 | break; |
| 850 | } |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 851 | desc = desc->more; |
| 852 | } |
| 853 | for (i = 0; desc->sptes[i]; ++i) |
| 854 | ++count; |
| 855 | desc->sptes[i] = spte; |
| 856 | } |
| 857 | return count; |
| 858 | } |
| 859 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 860 | static void |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 861 | pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, |
| 862 | struct pte_list_desc *desc, int i, |
| 863 | struct pte_list_desc *prev_desc) |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 864 | { |
| 865 | int j; |
| 866 | |
| 867 | for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) |
| 868 | ; |
| 869 | desc->sptes[i] = desc->sptes[j]; |
| 870 | desc->sptes[j] = NULL; |
| 871 | if (j != 0) |
| 872 | return; |
| 873 | if (!prev_desc && !desc->more) |
Miaohe Lin | fe3c2b4 | 2019-12-05 11:40:16 +0800 | [diff] [blame] | 874 | rmap_head->val = 0; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 875 | else |
| 876 | if (prev_desc) |
| 877 | prev_desc->more = desc->more; |
| 878 | else |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 879 | rmap_head->val = (unsigned long)desc->more | 1; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 880 | mmu_free_pte_list_desc(desc); |
| 881 | } |
| 882 | |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 883 | static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 884 | { |
| 885 | struct pte_list_desc *desc; |
| 886 | struct pte_list_desc *prev_desc; |
| 887 | int i; |
| 888 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 889 | if (!rmap_head->val) { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 890 | pr_err("%s: %p 0->BUG\n", __func__, spte); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 891 | BUG(); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 892 | } else if (!(rmap_head->val & 1)) { |
Stephen Zhang | 805a0f8 | 2021-01-27 10:08:45 +0800 | [diff] [blame] | 893 | rmap_printk("%p 1->0\n", spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 894 | if ((u64 *)rmap_head->val != spte) { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 895 | pr_err("%s: %p 1->BUG\n", __func__, spte); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 896 | BUG(); |
| 897 | } |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 898 | rmap_head->val = 0; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 899 | } else { |
Stephen Zhang | 805a0f8 | 2021-01-27 10:08:45 +0800 | [diff] [blame] | 900 | rmap_printk("%p many->many\n", spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 901 | desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 902 | prev_desc = NULL; |
| 903 | while (desc) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 904 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 905 | if (desc->sptes[i] == spte) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 906 | pte_list_desc_remove_entry(rmap_head, |
| 907 | desc, i, prev_desc); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 908 | return; |
| 909 | } |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 910 | } |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 911 | prev_desc = desc; |
| 912 | desc = desc->more; |
| 913 | } |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 914 | pr_err("%s: %p many->many\n", __func__, spte); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 915 | BUG(); |
| 916 | } |
| 917 | } |
| 918 | |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 919 | static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) |
| 920 | { |
| 921 | mmu_spte_clear_track_bits(sptep); |
| 922 | __pte_list_remove(sptep, rmap_head); |
| 923 | } |
| 924 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 925 | static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, |
| 926 | struct kvm_memory_slot *slot) |
Takuya Yoshikawa | 9b9b149 | 2011-11-14 18:22:28 +0900 | [diff] [blame] | 927 | { |
Takuya Yoshikawa | 77d1130 | 2012-07-02 17:57:17 +0900 | [diff] [blame] | 928 | unsigned long idx; |
Takuya Yoshikawa | 9b9b149 | 2011-11-14 18:22:28 +0900 | [diff] [blame] | 929 | |
Takuya Yoshikawa | 77d1130 | 2012-07-02 17:57:17 +0900 | [diff] [blame] | 930 | idx = gfn_to_index(gfn, slot->base_gfn, level); |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 931 | return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; |
Takuya Yoshikawa | 9b9b149 | 2011-11-14 18:22:28 +0900 | [diff] [blame] | 932 | } |
| 933 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 934 | static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, |
| 935 | struct kvm_mmu_page *sp) |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 936 | { |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 937 | struct kvm_memslots *slots; |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 938 | struct kvm_memory_slot *slot; |
| 939 | |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 940 | slots = kvm_memslots_for_spte_role(kvm, sp->role); |
| 941 | slot = __gfn_to_memslot(slots, gfn); |
Paolo Bonzini | e4cd1da | 2015-05-18 15:11:46 +0200 | [diff] [blame] | 942 | return __gfn_to_rmap(gfn, sp->role.level, slot); |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 943 | } |
| 944 | |
Xiao Guangrong | f759e2b | 2011-09-22 16:53:17 +0800 | [diff] [blame] | 945 | static bool rmap_can_add(struct kvm_vcpu *vcpu) |
| 946 | { |
Sean Christopherson | 356ec69 | 2020-07-02 19:35:27 -0700 | [diff] [blame] | 947 | struct kvm_mmu_memory_cache *mc; |
Xiao Guangrong | f759e2b | 2011-09-22 16:53:17 +0800 | [diff] [blame] | 948 | |
Sean Christopherson | 356ec69 | 2020-07-02 19:35:27 -0700 | [diff] [blame] | 949 | mc = &vcpu->arch.mmu_pte_list_desc_cache; |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 950 | return kvm_mmu_memory_cache_nr_free_objects(mc); |
Xiao Guangrong | f759e2b | 2011-09-22 16:53:17 +0800 | [diff] [blame] | 951 | } |
| 952 | |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 953 | static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 954 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 955 | struct kvm_mmu_page *sp; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 956 | struct kvm_rmap_head *rmap_head; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 957 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 958 | sp = sptep_to_sp(spte); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 959 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 960 | rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); |
| 961 | return pte_list_add(vcpu, spte, rmap_head); |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 962 | } |
| 963 | |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 964 | static void rmap_remove(struct kvm *kvm, u64 *spte) |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 965 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 966 | struct kvm_mmu_page *sp; |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 967 | gfn_t gfn; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 968 | struct kvm_rmap_head *rmap_head; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 969 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 970 | sp = sptep_to_sp(spte); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 971 | gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 972 | rmap_head = gfn_to_rmap(kvm, gfn, sp); |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 973 | __pte_list_remove(spte, rmap_head); |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 974 | } |
| 975 | |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 976 | /* |
| 977 | * Used by the following functions to iterate through the sptes linked by a |
| 978 | * rmap. All fields are private and not assumed to be used outside. |
| 979 | */ |
| 980 | struct rmap_iterator { |
| 981 | /* private fields */ |
| 982 | struct pte_list_desc *desc; /* holds the sptep if not NULL */ |
| 983 | int pos; /* index of the sptep */ |
| 984 | }; |
| 985 | |
| 986 | /* |
| 987 | * Iteration must be started by this function. This should also be used after |
| 988 | * removing/dropping sptes from the rmap link because in such cases the |
Miaohe Lin | 0a03cbd | 2019-12-06 16:20:18 +0800 | [diff] [blame] | 989 | * information in the iterator may not be valid. |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 990 | * |
| 991 | * Returns sptep if found, NULL otherwise. |
| 992 | */ |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 993 | static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head, |
| 994 | struct rmap_iterator *iter) |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 995 | { |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 996 | u64 *sptep; |
| 997 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 998 | if (!rmap_head->val) |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 999 | return NULL; |
| 1000 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1001 | if (!(rmap_head->val & 1)) { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1002 | iter->desc = NULL; |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1003 | sptep = (u64 *)rmap_head->val; |
| 1004 | goto out; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1005 | } |
| 1006 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1007 | iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1008 | iter->pos = 0; |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1009 | sptep = iter->desc->sptes[iter->pos]; |
| 1010 | out: |
| 1011 | BUG_ON(!is_shadow_present_pte(*sptep)); |
| 1012 | return sptep; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1013 | } |
| 1014 | |
| 1015 | /* |
| 1016 | * Must be used with a valid iterator: e.g. after rmap_get_first(). |
| 1017 | * |
| 1018 | * Returns sptep if found, NULL otherwise. |
| 1019 | */ |
| 1020 | static u64 *rmap_get_next(struct rmap_iterator *iter) |
| 1021 | { |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1022 | u64 *sptep; |
| 1023 | |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1024 | if (iter->desc) { |
| 1025 | if (iter->pos < PTE_LIST_EXT - 1) { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1026 | ++iter->pos; |
| 1027 | sptep = iter->desc->sptes[iter->pos]; |
| 1028 | if (sptep) |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1029 | goto out; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1030 | } |
| 1031 | |
| 1032 | iter->desc = iter->desc->more; |
| 1033 | |
| 1034 | if (iter->desc) { |
| 1035 | iter->pos = 0; |
| 1036 | /* desc->sptes[0] cannot be NULL */ |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1037 | sptep = iter->desc->sptes[iter->pos]; |
| 1038 | goto out; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1039 | } |
| 1040 | } |
| 1041 | |
| 1042 | return NULL; |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1043 | out: |
| 1044 | BUG_ON(!is_shadow_present_pte(*sptep)); |
| 1045 | return sptep; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1046 | } |
| 1047 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1048 | #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \ |
| 1049 | for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \ |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1050 | _spte_; _spte_ = rmap_get_next(_iter_)) |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1051 | |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 1052 | static void drop_spte(struct kvm *kvm, u64 *sptep) |
Xiao Guangrong | e4b502e | 2010-07-16 11:28:09 +0800 | [diff] [blame] | 1053 | { |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 1054 | if (mmu_spte_clear_track_bits(sptep)) |
Marcelo Tosatti | eb45fda | 2010-10-25 11:58:22 -0200 | [diff] [blame] | 1055 | rmap_remove(kvm, sptep); |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 1056 | } |
| 1057 | |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1058 | |
| 1059 | static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) |
| 1060 | { |
| 1061 | if (is_large_pte(*sptep)) { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 1062 | WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K); |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1063 | drop_spte(kvm, sptep); |
| 1064 | --kvm->stat.lpages; |
| 1065 | return true; |
| 1066 | } |
| 1067 | |
| 1068 | return false; |
| 1069 | } |
| 1070 | |
| 1071 | static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) |
| 1072 | { |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 1073 | if (__drop_large_spte(vcpu->kvm, sptep)) { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 1074 | struct kvm_mmu_page *sp = sptep_to_sp(sptep); |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 1075 | |
| 1076 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, |
| 1077 | KVM_PAGES_PER_HPAGE(sp->role.level)); |
| 1078 | } |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1079 | } |
| 1080 | |
| 1081 | /* |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1082 | * Write-protect on the specified @sptep, @pt_protect indicates whether |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 1083 | * spte write-protection is caused by protecting shadow page table. |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1084 | * |
Tiejun Chen | b461966 | 2014-09-22 10:31:38 +0800 | [diff] [blame] | 1085 | * Note: write protection is difference between dirty logging and spte |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1086 | * protection: |
| 1087 | * - for dirty logging, the spte can be set to writable at anytime if |
| 1088 | * its dirty bitmap is properly set. |
| 1089 | * - for spte protection, the spte can be writable only after unsync-ing |
| 1090 | * shadow page. |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1091 | * |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 1092 | * Return true if tlb need be flushed. |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1093 | */ |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1094 | static bool spte_write_protect(u64 *sptep, bool pt_protect) |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1095 | { |
| 1096 | u64 spte = *sptep; |
| 1097 | |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1098 | if (!is_writable_pte(spte) && |
Junaid Shahid | ea4114b | 2016-12-06 16:46:11 -0800 | [diff] [blame] | 1099 | !(pt_protect && spte_can_locklessly_be_made_writable(spte))) |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1100 | return false; |
| 1101 | |
Stephen Zhang | 805a0f8 | 2021-01-27 10:08:45 +0800 | [diff] [blame] | 1102 | rmap_printk("spte %p %llx\n", sptep, *sptep); |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1103 | |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1104 | if (pt_protect) |
Sean Christopherson | 5fc3424 | 2021-02-25 12:47:43 -0800 | [diff] [blame] | 1105 | spte &= ~shadow_mmu_writable_mask; |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1106 | spte = spte & ~PT_WRITABLE_MASK; |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1107 | |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 1108 | return mmu_spte_update(sptep, spte); |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1109 | } |
| 1110 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1111 | static bool __rmap_write_protect(struct kvm *kvm, |
| 1112 | struct kvm_rmap_head *rmap_head, |
Takuya Yoshikawa | 245c391 | 2013-01-08 19:44:09 +0900 | [diff] [blame] | 1113 | bool pt_protect) |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1114 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1115 | u64 *sptep; |
| 1116 | struct rmap_iterator iter; |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1117 | bool flush = false; |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1118 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1119 | for_each_rmap_spte(rmap_head, &iter, sptep) |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1120 | flush |= spte_write_protect(sptep, pt_protect); |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1121 | |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1122 | return flush; |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1123 | } |
| 1124 | |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1125 | static bool spte_clear_dirty(u64 *sptep) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1126 | { |
| 1127 | u64 spte = *sptep; |
| 1128 | |
Stephen Zhang | 805a0f8 | 2021-01-27 10:08:45 +0800 | [diff] [blame] | 1129 | rmap_printk("spte %p %llx\n", sptep, *sptep); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1130 | |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1131 | MMU_WARN_ON(!spte_ad_enabled(spte)); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1132 | spte &= ~shadow_dirty_mask; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1133 | return mmu_spte_update(sptep, spte); |
| 1134 | } |
| 1135 | |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1136 | static bool spte_wrprot_for_clear_dirty(u64 *sptep) |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1137 | { |
| 1138 | bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT, |
| 1139 | (unsigned long *)sptep); |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1140 | if (was_writable && !spte_ad_enabled(*sptep)) |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1141 | kvm_set_pfn_dirty(spte_to_pfn(*sptep)); |
| 1142 | |
| 1143 | return was_writable; |
| 1144 | } |
| 1145 | |
| 1146 | /* |
| 1147 | * Gets the GFN ready for another round of dirty logging by clearing the |
| 1148 | * - D bit on ad-enabled SPTEs, and |
| 1149 | * - W bit on ad-disabled SPTEs. |
| 1150 | * Returns true iff any D or W bits were cleared. |
| 1151 | */ |
Sean Christopherson | 0a234f5 | 2021-02-12 16:50:05 -0800 | [diff] [blame] | 1152 | static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
| 1153 | struct kvm_memory_slot *slot) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1154 | { |
| 1155 | u64 *sptep; |
| 1156 | struct rmap_iterator iter; |
| 1157 | bool flush = false; |
| 1158 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1159 | for_each_rmap_spte(rmap_head, &iter, sptep) |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1160 | if (spte_ad_need_write_protect(*sptep)) |
| 1161 | flush |= spte_wrprot_for_clear_dirty(sptep); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1162 | else |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1163 | flush |= spte_clear_dirty(sptep); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1164 | |
| 1165 | return flush; |
| 1166 | } |
| 1167 | |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1168 | /** |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1169 | * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1170 | * @kvm: kvm instance |
| 1171 | * @slot: slot to protect |
| 1172 | * @gfn_offset: start of the BITS_PER_LONG pages we care about |
| 1173 | * @mask: indicates which pages we should protect |
| 1174 | * |
Keqian Zhu | 8921291 | 2021-04-29 11:41:15 +0800 | [diff] [blame] | 1175 | * Used when we do not need to care about huge page mappings. |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1176 | */ |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1177 | static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1178 | struct kvm_memory_slot *slot, |
| 1179 | gfn_t gfn_offset, unsigned long mask) |
Izik Eidus | 98348e9 | 2007-10-16 14:42:30 +0200 | [diff] [blame] | 1180 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1181 | struct kvm_rmap_head *rmap_head; |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1182 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 1183 | if (is_tdp_mmu_enabled(kvm)) |
Ben Gardon | a6a0b05 | 2020-10-14 11:26:55 -0700 | [diff] [blame] | 1184 | kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, |
| 1185 | slot->base_gfn + gfn_offset, mask, true); |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1186 | |
| 1187 | if (!kvm_memslots_have_rmaps(kvm)) |
| 1188 | return; |
| 1189 | |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1190 | while (mask) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1191 | rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1192 | PG_LEVEL_4K, slot); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1193 | __rmap_write_protect(kvm, rmap_head, false); |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1194 | |
| 1195 | /* clear the first set bit */ |
| 1196 | mask &= mask - 1; |
| 1197 | } |
| 1198 | } |
| 1199 | |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1200 | /** |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1201 | * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write |
| 1202 | * protect the page if the D-bit isn't supported. |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1203 | * @kvm: kvm instance |
| 1204 | * @slot: slot to clear D-bit |
| 1205 | * @gfn_offset: start of the BITS_PER_LONG pages we care about |
| 1206 | * @mask: indicates which pages we should clear D-bit |
| 1207 | * |
| 1208 | * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. |
| 1209 | */ |
Sean Christopherson | a018eba | 2021-02-12 16:50:10 -0800 | [diff] [blame] | 1210 | static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, |
| 1211 | struct kvm_memory_slot *slot, |
| 1212 | gfn_t gfn_offset, unsigned long mask) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1213 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1214 | struct kvm_rmap_head *rmap_head; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1215 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 1216 | if (is_tdp_mmu_enabled(kvm)) |
Ben Gardon | a6a0b05 | 2020-10-14 11:26:55 -0700 | [diff] [blame] | 1217 | kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, |
| 1218 | slot->base_gfn + gfn_offset, mask, false); |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1219 | |
| 1220 | if (!kvm_memslots_have_rmaps(kvm)) |
| 1221 | return; |
| 1222 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1223 | while (mask) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1224 | rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1225 | PG_LEVEL_4K, slot); |
Sean Christopherson | 0a234f5 | 2021-02-12 16:50:05 -0800 | [diff] [blame] | 1226 | __rmap_clear_dirty(kvm, rmap_head, slot); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1227 | |
| 1228 | /* clear the first set bit */ |
| 1229 | mask &= mask - 1; |
| 1230 | } |
| 1231 | } |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1232 | |
| 1233 | /** |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1234 | * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected |
| 1235 | * PT level pages. |
| 1236 | * |
| 1237 | * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to |
| 1238 | * enable dirty logging for them. |
| 1239 | * |
Keqian Zhu | 8921291 | 2021-04-29 11:41:15 +0800 | [diff] [blame] | 1240 | * We need to care about huge page mappings: e.g. during dirty logging we may |
| 1241 | * have such mappings. |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1242 | */ |
| 1243 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
| 1244 | struct kvm_memory_slot *slot, |
| 1245 | gfn_t gfn_offset, unsigned long mask) |
| 1246 | { |
Keqian Zhu | 8921291 | 2021-04-29 11:41:15 +0800 | [diff] [blame] | 1247 | /* |
| 1248 | * Huge pages are NOT write protected when we start dirty logging in |
| 1249 | * initially-all-set mode; must write protect them here so that they |
| 1250 | * are split to 4K on the first write. |
| 1251 | * |
| 1252 | * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn |
| 1253 | * of memslot has no such restriction, so the range can cross two large |
| 1254 | * pages. |
| 1255 | */ |
| 1256 | if (kvm_dirty_log_manual_protect_and_init_set(kvm)) { |
| 1257 | gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask); |
| 1258 | gfn_t end = slot->base_gfn + gfn_offset + __fls(mask); |
| 1259 | |
| 1260 | kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M); |
| 1261 | |
| 1262 | /* Cross two large pages? */ |
| 1263 | if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) != |
| 1264 | ALIGN(end << PAGE_SHIFT, PMD_SIZE)) |
| 1265 | kvm_mmu_slot_gfn_write_protect(kvm, slot, end, |
| 1266 | PG_LEVEL_2M); |
| 1267 | } |
| 1268 | |
| 1269 | /* Now handle 4K PTEs. */ |
Sean Christopherson | a018eba | 2021-02-12 16:50:10 -0800 | [diff] [blame] | 1270 | if (kvm_x86_ops.cpu_dirty_log_size) |
| 1271 | kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask); |
Kai Huang | 88178fd | 2015-01-28 10:54:27 +0800 | [diff] [blame] | 1272 | else |
| 1273 | kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1274 | } |
| 1275 | |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 1276 | int kvm_cpu_dirty_log_size(void) |
| 1277 | { |
Sean Christopherson | 6dd0380 | 2021-02-12 16:50:09 -0800 | [diff] [blame] | 1278 | return kvm_x86_ops.cpu_dirty_log_size; |
Peter Xu | fb04a1e | 2020-09-30 21:22:22 -0400 | [diff] [blame] | 1279 | } |
| 1280 | |
Xiao Guangrong | aeecee2 | 2016-02-24 17:51:08 +0800 | [diff] [blame] | 1281 | bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, |
Keqian Zhu | 3ad9356 | 2021-04-29 11:41:14 +0800 | [diff] [blame] | 1282 | struct kvm_memory_slot *slot, u64 gfn, |
| 1283 | int min_level) |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1284 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1285 | struct kvm_rmap_head *rmap_head; |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1286 | int i; |
Xiao Guangrong | 2f84569 | 2012-06-20 15:56:53 +0800 | [diff] [blame] | 1287 | bool write_protected = false; |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1288 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1289 | if (kvm_memslots_have_rmaps(kvm)) { |
| 1290 | for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { |
| 1291 | rmap_head = __gfn_to_rmap(gfn, i, slot); |
| 1292 | write_protected |= __rmap_write_protect(kvm, rmap_head, true); |
| 1293 | } |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1294 | } |
| 1295 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 1296 | if (is_tdp_mmu_enabled(kvm)) |
Ben Gardon | 46044f7 | 2020-10-14 11:26:57 -0700 | [diff] [blame] | 1297 | write_protected |= |
Keqian Zhu | 3ad9356 | 2021-04-29 11:41:14 +0800 | [diff] [blame] | 1298 | kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level); |
Ben Gardon | 46044f7 | 2020-10-14 11:26:57 -0700 | [diff] [blame] | 1299 | |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 1300 | return write_protected; |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1301 | } |
| 1302 | |
Xiao Guangrong | aeecee2 | 2016-02-24 17:51:08 +0800 | [diff] [blame] | 1303 | static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) |
| 1304 | { |
| 1305 | struct kvm_memory_slot *slot; |
| 1306 | |
| 1307 | slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
Keqian Zhu | 3ad9356 | 2021-04-29 11:41:14 +0800 | [diff] [blame] | 1308 | return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K); |
Xiao Guangrong | aeecee2 | 2016-02-24 17:51:08 +0800 | [diff] [blame] | 1309 | } |
| 1310 | |
Sean Christopherson | 0a234f5 | 2021-02-12 16:50:05 -0800 | [diff] [blame] | 1311 | static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
| 1312 | struct kvm_memory_slot *slot) |
Xiao Guangrong | 6a49f85 | 2015-05-13 14:42:25 +0800 | [diff] [blame] | 1313 | { |
| 1314 | u64 *sptep; |
| 1315 | struct rmap_iterator iter; |
| 1316 | bool flush = false; |
| 1317 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1318 | while ((sptep = rmap_get_first(rmap_head, &iter))) { |
Stephen Zhang | 805a0f8 | 2021-01-27 10:08:45 +0800 | [diff] [blame] | 1319 | rmap_printk("spte %p %llx.\n", sptep, *sptep); |
Xiao Guangrong | 6a49f85 | 2015-05-13 14:42:25 +0800 | [diff] [blame] | 1320 | |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 1321 | pte_list_remove(rmap_head, sptep); |
Xiao Guangrong | 6a49f85 | 2015-05-13 14:42:25 +0800 | [diff] [blame] | 1322 | flush = true; |
| 1323 | } |
| 1324 | |
| 1325 | return flush; |
| 1326 | } |
| 1327 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1328 | static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
| 1329 | struct kvm_memory_slot *slot, gfn_t gfn, int level, |
| 1330 | pte_t unused) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1331 | { |
Sean Christopherson | 0a234f5 | 2021-02-12 16:50:05 -0800 | [diff] [blame] | 1332 | return kvm_zap_rmapp(kvm, rmap_head, slot); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1333 | } |
| 1334 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1335 | static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
| 1336 | struct kvm_memory_slot *slot, gfn_t gfn, int level, |
| 1337 | pte_t pte) |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1338 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1339 | u64 *sptep; |
| 1340 | struct rmap_iterator iter; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1341 | int need_flush = 0; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1342 | u64 new_spte; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 1343 | kvm_pfn_t new_pfn; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1344 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1345 | WARN_ON(pte_huge(pte)); |
| 1346 | new_pfn = pte_pfn(pte); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1347 | |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1348 | restart: |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1349 | for_each_rmap_spte(rmap_head, &iter, sptep) { |
Stephen Zhang | 805a0f8 | 2021-01-27 10:08:45 +0800 | [diff] [blame] | 1350 | rmap_printk("spte %p %llx gfn %llx (%d)\n", |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1351 | sptep, *sptep, gfn, level); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1352 | |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1353 | need_flush = 1; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1354 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1355 | if (pte_write(pte)) { |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 1356 | pte_list_remove(rmap_head, sptep); |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1357 | goto restart; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1358 | } else { |
Paolo Bonzini | cb3eeda | 2020-09-28 10:17:17 -0400 | [diff] [blame] | 1359 | new_spte = kvm_mmu_changed_pte_notifier_make_spte( |
| 1360 | *sptep, new_pfn); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1361 | |
| 1362 | mmu_spte_clear_track_bits(sptep); |
| 1363 | mmu_spte_set(sptep, new_spte); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1364 | } |
| 1365 | } |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1366 | |
Lan Tianyu | 3cc5ea9 | 2018-12-06 21:21:12 +0800 | [diff] [blame] | 1367 | if (need_flush && kvm_available_flush_tlb_with_range()) { |
| 1368 | kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); |
| 1369 | return 0; |
| 1370 | } |
| 1371 | |
Lan Tianyu | 0cf853c | 2018-12-06 21:21:11 +0800 | [diff] [blame] | 1372 | return need_flush; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1373 | } |
| 1374 | |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1375 | struct slot_rmap_walk_iterator { |
| 1376 | /* input fields. */ |
| 1377 | struct kvm_memory_slot *slot; |
| 1378 | gfn_t start_gfn; |
| 1379 | gfn_t end_gfn; |
| 1380 | int start_level; |
| 1381 | int end_level; |
| 1382 | |
| 1383 | /* output fields. */ |
| 1384 | gfn_t gfn; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1385 | struct kvm_rmap_head *rmap; |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1386 | int level; |
| 1387 | |
| 1388 | /* private field. */ |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1389 | struct kvm_rmap_head *end_rmap; |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1390 | }; |
| 1391 | |
| 1392 | static void |
| 1393 | rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) |
| 1394 | { |
| 1395 | iterator->level = level; |
| 1396 | iterator->gfn = iterator->start_gfn; |
| 1397 | iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot); |
| 1398 | iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level, |
| 1399 | iterator->slot); |
| 1400 | } |
| 1401 | |
| 1402 | static void |
| 1403 | slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, |
| 1404 | struct kvm_memory_slot *slot, int start_level, |
| 1405 | int end_level, gfn_t start_gfn, gfn_t end_gfn) |
| 1406 | { |
| 1407 | iterator->slot = slot; |
| 1408 | iterator->start_level = start_level; |
| 1409 | iterator->end_level = end_level; |
| 1410 | iterator->start_gfn = start_gfn; |
| 1411 | iterator->end_gfn = end_gfn; |
| 1412 | |
| 1413 | rmap_walk_init_level(iterator, iterator->start_level); |
| 1414 | } |
| 1415 | |
| 1416 | static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator) |
| 1417 | { |
| 1418 | return !!iterator->rmap; |
| 1419 | } |
| 1420 | |
| 1421 | static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) |
| 1422 | { |
| 1423 | if (++iterator->rmap <= iterator->end_rmap) { |
| 1424 | iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); |
| 1425 | return; |
| 1426 | } |
| 1427 | |
| 1428 | if (++iterator->level > iterator->end_level) { |
| 1429 | iterator->rmap = NULL; |
| 1430 | return; |
| 1431 | } |
| 1432 | |
| 1433 | rmap_walk_init_level(iterator, iterator->level); |
| 1434 | } |
| 1435 | |
| 1436 | #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \ |
| 1437 | _start_gfn, _end_gfn, _iter_) \ |
| 1438 | for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \ |
| 1439 | _end_level_, _start_gfn, _end_gfn); \ |
| 1440 | slot_rmap_walk_okay(_iter_); \ |
| 1441 | slot_rmap_walk_next(_iter_)) |
| 1442 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1443 | typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
| 1444 | struct kvm_memory_slot *slot, gfn_t gfn, |
| 1445 | int level, pte_t pte); |
Sean Christopherson | c1b9149 | 2021-02-25 17:03:28 -0800 | [diff] [blame] | 1446 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1447 | static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, |
| 1448 | struct kvm_gfn_range *range, |
| 1449 | rmap_handler_t handler) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1450 | { |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1451 | struct slot_rmap_walk_iterator iterator; |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1452 | bool ret = false; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1453 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1454 | for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, |
| 1455 | range->start, range->end - 1, &iterator) |
| 1456 | ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn, |
| 1457 | iterator.level, range->pte); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1458 | |
Takuya Yoshikawa | f395302 | 2012-07-02 17:58:48 +0900 | [diff] [blame] | 1459 | return ret; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1460 | } |
| 1461 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1462 | bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 1463 | { |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1464 | bool flush = false; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1465 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1466 | if (kvm_memslots_have_rmaps(kvm)) |
| 1467 | flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp); |
Ben Gardon | 063afac | 2020-10-14 11:26:52 -0700 | [diff] [blame] | 1468 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 1469 | if (is_tdp_mmu_enabled(kvm)) |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1470 | flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); |
Ben Gardon | 063afac | 2020-10-14 11:26:52 -0700 | [diff] [blame] | 1471 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1472 | return flush; |
Takuya Yoshikawa | b3ae209 | 2012-07-02 17:56:33 +0900 | [diff] [blame] | 1473 | } |
| 1474 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1475 | bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1476 | { |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1477 | bool flush = false; |
Ben Gardon | 1d8dd6b | 2020-10-14 11:26:54 -0700 | [diff] [blame] | 1478 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1479 | if (kvm_memslots_have_rmaps(kvm)) |
| 1480 | flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp); |
Ben Gardon | 1d8dd6b | 2020-10-14 11:26:54 -0700 | [diff] [blame] | 1481 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 1482 | if (is_tdp_mmu_enabled(kvm)) |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1483 | flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range); |
Ben Gardon | 1d8dd6b | 2020-10-14 11:26:54 -0700 | [diff] [blame] | 1484 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1485 | return flush; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1486 | } |
| 1487 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1488 | static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
| 1489 | struct kvm_memory_slot *slot, gfn_t gfn, int level, |
| 1490 | pte_t unused) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1491 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1492 | u64 *sptep; |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 1493 | struct rmap_iterator iter; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1494 | int young = 0; |
| 1495 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1496 | for_each_rmap_spte(rmap_head, &iter, sptep) |
| 1497 | young |= mmu_spte_age(sptep); |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1498 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1499 | return young; |
| 1500 | } |
| 1501 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1502 | static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
| 1503 | struct kvm_memory_slot *slot, gfn_t gfn, |
| 1504 | int level, pte_t unused) |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1505 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1506 | u64 *sptep; |
| 1507 | struct rmap_iterator iter; |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1508 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 1509 | for_each_rmap_spte(rmap_head, &iter, sptep) |
| 1510 | if (is_accessed_spte(*sptep)) |
| 1511 | return 1; |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 1512 | return 0; |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1513 | } |
| 1514 | |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1515 | #define RMAP_RECYCLE_THRESHOLD 1000 |
| 1516 | |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 1517 | static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1518 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1519 | struct kvm_rmap_head *rmap_head; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 1520 | struct kvm_mmu_page *sp; |
| 1521 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 1522 | sp = sptep_to_sp(spte); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1523 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1524 | rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1525 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1526 | kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 1527 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, |
| 1528 | KVM_PAGES_PER_HPAGE(sp->role.level)); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1529 | } |
| 1530 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1531 | bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1532 | { |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1533 | bool young = false; |
Ben Gardon | f8e1449 | 2020-10-14 11:26:53 -0700 | [diff] [blame] | 1534 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1535 | if (kvm_memslots_have_rmaps(kvm)) |
| 1536 | young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp); |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1537 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 1538 | if (is_tdp_mmu_enabled(kvm)) |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1539 | young |= kvm_tdp_mmu_age_gfn_range(kvm, range); |
Ben Gardon | f8e1449 | 2020-10-14 11:26:53 -0700 | [diff] [blame] | 1540 | |
| 1541 | return young; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1542 | } |
| 1543 | |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1544 | bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1545 | { |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1546 | bool young = false; |
Ben Gardon | f8e1449 | 2020-10-14 11:26:53 -0700 | [diff] [blame] | 1547 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 1548 | if (kvm_memslots_have_rmaps(kvm)) |
| 1549 | young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp); |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1550 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 1551 | if (is_tdp_mmu_enabled(kvm)) |
Sean Christopherson | 3039bcc | 2021-04-01 17:56:50 -0700 | [diff] [blame] | 1552 | young |= kvm_tdp_mmu_test_age_gfn(kvm, range); |
Ben Gardon | f8e1449 | 2020-10-14 11:26:53 -0700 | [diff] [blame] | 1553 | |
| 1554 | return young; |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1555 | } |
| 1556 | |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 1557 | #ifdef MMU_DEBUG |
Avi Kivity | 47ad8e6 | 2007-05-06 15:50:58 +0300 | [diff] [blame] | 1558 | static int is_empty_shadow_page(u64 *spt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1559 | { |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 1560 | u64 *pos; |
| 1561 | u64 *end; |
| 1562 | |
Avi Kivity | 47ad8e6 | 2007-05-06 15:50:58 +0300 | [diff] [blame] | 1563 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
Avi Kivity | 3c91551 | 2008-05-20 16:21:13 +0300 | [diff] [blame] | 1564 | if (is_shadow_present_pte(*pos)) { |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 1565 | printk(KERN_ERR "%s: %p %llx\n", __func__, |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 1566 | pos, *pos); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1567 | return 0; |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 1568 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1569 | return 1; |
| 1570 | } |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 1571 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1572 | |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 1573 | /* |
| 1574 | * This value is the sum of all of the kvm instances's |
| 1575 | * kvm->arch.n_used_mmu_pages values. We need a global, |
| 1576 | * aggregate version in order to make the slab shrinker |
| 1577 | * faster |
| 1578 | */ |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 1579 | static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 1580 | { |
| 1581 | kvm->arch.n_used_mmu_pages += nr; |
| 1582 | percpu_counter_add(&kvm_total_used_mmu_pages, nr); |
| 1583 | } |
| 1584 | |
Gleb Natapov | 834be0d | 2013-01-30 16:45:05 +0200 | [diff] [blame] | 1585 | static void kvm_mmu_free_page(struct kvm_mmu_page *sp) |
Avi Kivity | 260746c | 2007-01-05 16:36:49 -0800 | [diff] [blame] | 1586 | { |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 1587 | MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1588 | hlist_del(&sp->hash_link); |
Xiao Guangrong | bd4c86e | 2011-07-12 03:27:14 +0800 | [diff] [blame] | 1589 | list_del(&sp->link); |
| 1590 | free_page((unsigned long)sp->spt); |
Gleb Natapov | 834be0d | 2013-01-30 16:45:05 +0200 | [diff] [blame] | 1591 | if (!sp->role.direct) |
| 1592 | free_page((unsigned long)sp->gfns); |
Xiao Guangrong | e8ad9a7 | 2010-05-13 10:06:02 +0800 | [diff] [blame] | 1593 | kmem_cache_free(mmu_page_header_cache, sp); |
Avi Kivity | 260746c | 2007-01-05 16:36:49 -0800 | [diff] [blame] | 1594 | } |
| 1595 | |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1596 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
| 1597 | { |
David Matlack | 114df30 | 2016-12-19 13:58:25 -0800 | [diff] [blame] | 1598 | return hash_64(gfn, KVM_MMU_HASH_SHIFT); |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1599 | } |
| 1600 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1601 | static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, |
| 1602 | struct kvm_mmu_page *sp, u64 *parent_pte) |
| 1603 | { |
| 1604 | if (!parent_pte) |
| 1605 | return; |
| 1606 | |
| 1607 | pte_list_add(vcpu, parent_pte, &sp->parent_ptes); |
| 1608 | } |
| 1609 | |
| 1610 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, |
| 1611 | u64 *parent_pte) |
| 1612 | { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1613 | __pte_list_remove(parent_pte, &sp->parent_ptes); |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1614 | } |
| 1615 | |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 1616 | static void drop_parent_pte(struct kvm_mmu_page *sp, |
| 1617 | u64 *parent_pte) |
| 1618 | { |
| 1619 | mmu_page_remove_parent_pte(sp, parent_pte); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 1620 | mmu_spte_clear_no_track(parent_pte); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 1621 | } |
| 1622 | |
Takuya Yoshikawa | 4700579 | 2015-11-20 17:46:29 +0900 | [diff] [blame] | 1623 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1624 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1625 | struct kvm_mmu_page *sp; |
Takuya Yoshikawa | 7ddca7e | 2013-03-21 19:33:43 +0900 | [diff] [blame] | 1626 | |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 1627 | sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); |
| 1628 | sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1629 | if (!direct) |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 1630 | sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1631 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 1632 | |
| 1633 | /* |
| 1634 | * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages() |
| 1635 | * depends on valid pages being added to the head of the list. See |
| 1636 | * comments in kvm_zap_obsolete_pages(). |
| 1637 | */ |
Sean Christopherson | ca333ad | 2019-09-12 19:46:11 -0700 | [diff] [blame] | 1638 | sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; |
Zhang Xiantao | f05e70a | 2007-12-14 10:01:48 +0800 | [diff] [blame] | 1639 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 1640 | kvm_mod_used_mmu_pages(vcpu->kvm, +1); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1641 | return sp; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1642 | } |
| 1643 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1644 | static void mark_unsync(u64 *spte); |
Xiao Guangrong | 6b18493 | 2010-04-16 21:29:17 +0800 | [diff] [blame] | 1645 | static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) |
Marcelo Tosatti | 0074ff6 | 2008-09-23 13:18:40 -0300 | [diff] [blame] | 1646 | { |
Takuya Yoshikawa | 74c4e63 | 2015-11-26 21:15:38 +0900 | [diff] [blame] | 1647 | u64 *sptep; |
| 1648 | struct rmap_iterator iter; |
| 1649 | |
| 1650 | for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { |
| 1651 | mark_unsync(sptep); |
| 1652 | } |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 1653 | } |
| 1654 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1655 | static void mark_unsync(u64 *spte) |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 1656 | { |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1657 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 1658 | unsigned int index; |
| 1659 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 1660 | sp = sptep_to_sp(spte); |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 1661 | index = spte - sp->spt; |
| 1662 | if (__test_and_set_bit(index, sp->unsync_child_bitmap)) |
| 1663 | return; |
| 1664 | if (sp->unsync_children++) |
| 1665 | return; |
| 1666 | kvm_mmu_mark_parents_unsync(sp); |
Marcelo Tosatti | 0074ff6 | 2008-09-23 13:18:40 -0300 | [diff] [blame] | 1667 | } |
| 1668 | |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 1669 | static int nonpaging_sync_page(struct kvm_vcpu *vcpu, |
Xiao Guangrong | a4a8e6f | 2010-11-19 17:04:03 +0800 | [diff] [blame] | 1670 | struct kvm_mmu_page *sp) |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 1671 | { |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 1672 | return 0; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 1673 | } |
| 1674 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1675 | #define KVM_PAGE_ARRAY_NR 16 |
| 1676 | |
| 1677 | struct kvm_mmu_pages { |
| 1678 | struct mmu_page_and_offset { |
| 1679 | struct kvm_mmu_page *sp; |
| 1680 | unsigned int idx; |
| 1681 | } page[KVM_PAGE_ARRAY_NR]; |
| 1682 | unsigned int nr; |
| 1683 | }; |
| 1684 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 1685 | static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, |
| 1686 | int idx) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1687 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1688 | int i; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1689 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1690 | if (sp->unsync) |
| 1691 | for (i=0; i < pvec->nr; i++) |
| 1692 | if (pvec->page[i].sp == sp) |
| 1693 | return 0; |
| 1694 | |
| 1695 | pvec->page[pvec->nr].sp = sp; |
| 1696 | pvec->page[pvec->nr].idx = idx; |
| 1697 | pvec->nr++; |
| 1698 | return (pvec->nr == KVM_PAGE_ARRAY_NR); |
| 1699 | } |
| 1700 | |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 1701 | static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) |
| 1702 | { |
| 1703 | --sp->unsync_children; |
| 1704 | WARN_ON((int)sp->unsync_children < 0); |
| 1705 | __clear_bit(idx, sp->unsync_child_bitmap); |
| 1706 | } |
| 1707 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1708 | static int __mmu_unsync_walk(struct kvm_mmu_page *sp, |
| 1709 | struct kvm_mmu_pages *pvec) |
| 1710 | { |
| 1711 | int i, ret, nr_unsync_leaf = 0; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1712 | |
Takuya Yoshikawa | 37178b8 | 2011-11-29 14:02:45 +0900 | [diff] [blame] | 1713 | for_each_set_bit(i, sp->unsync_child_bitmap, 512) { |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 1714 | struct kvm_mmu_page *child; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1715 | u64 ent = sp->spt[i]; |
| 1716 | |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 1717 | if (!is_shadow_present_pte(ent) || is_large_pte(ent)) { |
| 1718 | clear_unsync_child_bit(sp, i); |
| 1719 | continue; |
| 1720 | } |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1721 | |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 1722 | child = to_shadow_page(ent & PT64_BASE_ADDR_MASK); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1723 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 1724 | if (child->unsync_children) { |
| 1725 | if (mmu_pages_add(pvec, child, i)) |
| 1726 | return -ENOSPC; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1727 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 1728 | ret = __mmu_unsync_walk(child, pvec); |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 1729 | if (!ret) { |
| 1730 | clear_unsync_child_bit(sp, i); |
| 1731 | continue; |
| 1732 | } else if (ret > 0) { |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 1733 | nr_unsync_leaf += ret; |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 1734 | } else |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 1735 | return ret; |
| 1736 | } else if (child->unsync) { |
| 1737 | nr_unsync_leaf++; |
| 1738 | if (mmu_pages_add(pvec, child, i)) |
| 1739 | return -ENOSPC; |
| 1740 | } else |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 1741 | clear_unsync_child_bit(sp, i); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1742 | } |
| 1743 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1744 | return nr_unsync_leaf; |
| 1745 | } |
| 1746 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 1747 | #define INVALID_INDEX (-1) |
| 1748 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1749 | static int mmu_unsync_walk(struct kvm_mmu_page *sp, |
| 1750 | struct kvm_mmu_pages *pvec) |
| 1751 | { |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 1752 | pvec->nr = 0; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1753 | if (!sp->unsync_children) |
| 1754 | return 0; |
| 1755 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 1756 | mmu_pages_add(pvec, sp, INVALID_INDEX); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1757 | return __mmu_unsync_walk(sp, pvec); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1758 | } |
| 1759 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1760 | static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 1761 | { |
| 1762 | WARN_ON(!sp->unsync); |
Xiao Guangrong | 5e1b3dd | 2010-04-28 11:55:06 +0800 | [diff] [blame] | 1763 | trace_kvm_mmu_sync_page(sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1764 | sp->unsync = 0; |
| 1765 | --kvm->stat.mmu_unsync; |
| 1766 | } |
| 1767 | |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 1768 | static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 1769 | struct list_head *invalid_list); |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1770 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 1771 | struct list_head *invalid_list); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1772 | |
Sean Christopherson | ac101b7 | 2020-06-23 12:40:26 -0700 | [diff] [blame] | 1773 | #define for_each_valid_sp(_kvm, _sp, _list) \ |
| 1774 | hlist_for_each_entry(_sp, _list, hash_link) \ |
Sean Christopherson | fac026d | 2019-09-12 19:46:03 -0700 | [diff] [blame] | 1775 | if (is_obsolete_sp((_kvm), (_sp))) { \ |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 1776 | } else |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1777 | |
Takuya Yoshikawa | 1044b03 | 2013-03-06 16:05:07 +0900 | [diff] [blame] | 1778 | #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ |
Sean Christopherson | ac101b7 | 2020-06-23 12:40:26 -0700 | [diff] [blame] | 1779 | for_each_valid_sp(_kvm, _sp, \ |
| 1780 | &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 1781 | if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1782 | |
Xiao Guangrong | f918b44 | 2010-06-11 21:30:36 +0800 | [diff] [blame] | 1783 | /* @sp->gfn should be write-protected at the call site */ |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 1784 | static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
| 1785 | struct list_head *invalid_list) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1786 | { |
Sean Christopherson | 00a6697 | 2021-06-22 10:56:55 -0700 | [diff] [blame^] | 1787 | union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base; |
| 1788 | |
| 1789 | if (sp->role.gpte_is_8_bytes != mmu_role.gpte_is_8_bytes || |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 1790 | vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1791 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 1792 | return false; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1793 | } |
| 1794 | |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 1795 | return true; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1796 | } |
| 1797 | |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 1798 | static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, |
| 1799 | struct list_head *invalid_list, |
| 1800 | bool remote_flush) |
| 1801 | { |
Sean Christopherson | cfd32ac | 2019-04-12 19:55:41 -0700 | [diff] [blame] | 1802 | if (!remote_flush && list_empty(invalid_list)) |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 1803 | return false; |
| 1804 | |
| 1805 | if (!list_empty(invalid_list)) |
| 1806 | kvm_mmu_commit_zap_page(kvm, invalid_list); |
| 1807 | else |
| 1808 | kvm_flush_remote_tlbs(kvm); |
| 1809 | return true; |
| 1810 | } |
| 1811 | |
Paolo Bonzini | 35a7051 | 2016-02-24 10:03:27 +0100 | [diff] [blame] | 1812 | static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, |
| 1813 | struct list_head *invalid_list, |
| 1814 | bool remote_flush, bool local_flush) |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1815 | { |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 1816 | if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) |
Paolo Bonzini | 35a7051 | 2016-02-24 10:03:27 +0100 | [diff] [blame] | 1817 | return; |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1818 | |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 1819 | if (local_flush) |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 1820 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1821 | } |
| 1822 | |
Xiao Guangrong | e37fa78 | 2011-11-30 17:43:24 +0800 | [diff] [blame] | 1823 | #ifdef CONFIG_KVM_MMU_AUDIT |
| 1824 | #include "mmu_audit.c" |
| 1825 | #else |
| 1826 | static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } |
| 1827 | static void mmu_audit_disable(void) { } |
| 1828 | #endif |
| 1829 | |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 1830 | static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 1831 | { |
Sean Christopherson | fac026d | 2019-09-12 19:46:03 -0700 | [diff] [blame] | 1832 | return sp->role.invalid || |
| 1833 | unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 1834 | } |
| 1835 | |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 1836 | static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1837 | struct list_head *invalid_list) |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1838 | { |
Paolo Bonzini | 9a43c5d | 2016-02-24 10:28:01 +0100 | [diff] [blame] | 1839 | kvm_unlink_unsync_page(vcpu->kvm, sp); |
| 1840 | return __kvm_sync_page(vcpu, sp, invalid_list); |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1841 | } |
| 1842 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1843 | struct mmu_page_path { |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 1844 | struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL]; |
| 1845 | unsigned int idx[PT64_ROOT_MAX_LEVEL]; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1846 | }; |
| 1847 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1848 | #define for_each_sp(pvec, sp, parents, i) \ |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 1849 | for (i = mmu_pages_first(&pvec, &parents); \ |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1850 | i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ |
| 1851 | i = mmu_pages_next(&pvec, &parents, i)) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1852 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 1853 | static int mmu_pages_next(struct kvm_mmu_pages *pvec, |
| 1854 | struct mmu_page_path *parents, |
| 1855 | int i) |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1856 | { |
| 1857 | int n; |
| 1858 | |
| 1859 | for (n = i+1; n < pvec->nr; n++) { |
| 1860 | struct kvm_mmu_page *sp = pvec->page[n].sp; |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 1861 | unsigned idx = pvec->page[n].idx; |
| 1862 | int level = sp->role.level; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1863 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 1864 | parents->idx[level-1] = idx; |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1865 | if (level == PG_LEVEL_4K) |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 1866 | break; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1867 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 1868 | parents->parent[level-2] = sp; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1869 | } |
| 1870 | |
| 1871 | return n; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1872 | } |
| 1873 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 1874 | static int mmu_pages_first(struct kvm_mmu_pages *pvec, |
| 1875 | struct mmu_page_path *parents) |
| 1876 | { |
| 1877 | struct kvm_mmu_page *sp; |
| 1878 | int level; |
| 1879 | |
| 1880 | if (pvec->nr == 0) |
| 1881 | return 0; |
| 1882 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 1883 | WARN_ON(pvec->page[0].idx != INVALID_INDEX); |
| 1884 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 1885 | sp = pvec->page[0].sp; |
| 1886 | level = sp->role.level; |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1887 | WARN_ON(level == PG_LEVEL_4K); |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 1888 | |
| 1889 | parents->parent[level-2] = sp; |
| 1890 | |
| 1891 | /* Also set up a sentinel. Further entries in pvec are all |
| 1892 | * children of sp, so this element is never overwritten. |
| 1893 | */ |
| 1894 | parents->parent[level-1] = NULL; |
| 1895 | return mmu_pages_next(pvec, parents, 0); |
| 1896 | } |
| 1897 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 1898 | static void mmu_pages_clear_parents(struct mmu_page_path *parents) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1899 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1900 | struct kvm_mmu_page *sp; |
| 1901 | unsigned int level = 0; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1902 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1903 | do { |
| 1904 | unsigned int idx = parents->idx[level]; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1905 | sp = parents->parent[level]; |
| 1906 | if (!sp) |
| 1907 | return; |
| 1908 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 1909 | WARN_ON(idx == INVALID_INDEX); |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 1910 | clear_unsync_child_bit(sp, idx); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1911 | level++; |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 1912 | } while (!sp->unsync_children); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1913 | } |
| 1914 | |
| 1915 | static void mmu_sync_children(struct kvm_vcpu *vcpu, |
| 1916 | struct kvm_mmu_page *parent) |
| 1917 | { |
| 1918 | int i; |
| 1919 | struct kvm_mmu_page *sp; |
| 1920 | struct mmu_page_path parents; |
| 1921 | struct kvm_mmu_pages pages; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1922 | LIST_HEAD(invalid_list); |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 1923 | bool flush = false; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1924 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1925 | while (mmu_unsync_walk(parent, &pages)) { |
Xiao Guangrong | 2f84569 | 2012-06-20 15:56:53 +0800 | [diff] [blame] | 1926 | bool protected = false; |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 1927 | |
| 1928 | for_each_sp(pages, sp, parents, i) |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 1929 | protected |= rmap_write_protect(vcpu, sp->gfn); |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 1930 | |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 1931 | if (protected) { |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 1932 | kvm_flush_remote_tlbs(vcpu->kvm); |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 1933 | flush = false; |
| 1934 | } |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 1935 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1936 | for_each_sp(pages, sp, parents, i) { |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 1937 | flush |= kvm_sync_page(vcpu, sp, &invalid_list); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1938 | mmu_pages_clear_parents(&parents); |
| 1939 | } |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 1940 | if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 1941 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 1942 | cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 1943 | flush = false; |
| 1944 | } |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1945 | } |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 1946 | |
| 1947 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1948 | } |
| 1949 | |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 1950 | static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) |
| 1951 | { |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 1952 | atomic_set(&sp->write_flooding_count, 0); |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 1953 | } |
| 1954 | |
| 1955 | static void clear_sp_write_flooding_count(u64 *spte) |
| 1956 | { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 1957 | __clear_sp_write_flooding_count(sptep_to_sp(spte)); |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 1958 | } |
| 1959 | |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1960 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, |
| 1961 | gfn_t gfn, |
| 1962 | gva_t gaddr, |
| 1963 | unsigned level, |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 1964 | int direct, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 1965 | unsigned int access) |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1966 | { |
Sean Christopherson | fb58a9c | 2020-06-23 12:40:27 -0700 | [diff] [blame] | 1967 | bool direct_mmu = vcpu->arch.mmu->direct_map; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1968 | union kvm_mmu_page_role role; |
Sean Christopherson | ac101b7 | 2020-06-23 12:40:26 -0700 | [diff] [blame] | 1969 | struct hlist_head *sp_list; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1970 | unsigned quadrant; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1971 | struct kvm_mmu_page *sp; |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 1972 | int collisions = 0; |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 1973 | LIST_HEAD(invalid_list); |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1974 | |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 1975 | role = vcpu->arch.mmu->mmu_role.base; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1976 | role.level = level; |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 1977 | role.direct = direct; |
Avi Kivity | 84b0c8c | 2010-03-14 10:16:40 +0200 | [diff] [blame] | 1978 | if (role.direct) |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 1979 | role.gpte_is_8_bytes = true; |
Avi Kivity | 41074d0 | 2007-12-09 17:00:02 +0200 | [diff] [blame] | 1980 | role.access = access; |
Sean Christopherson | fb58a9c | 2020-06-23 12:40:27 -0700 | [diff] [blame] | 1981 | if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1982 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); |
| 1983 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; |
| 1984 | role.quadrant = quadrant; |
| 1985 | } |
Sean Christopherson | ac101b7 | 2020-06-23 12:40:26 -0700 | [diff] [blame] | 1986 | |
| 1987 | sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]; |
| 1988 | for_each_valid_sp(vcpu->kvm, sp, sp_list) { |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 1989 | if (sp->gfn != gfn) { |
| 1990 | collisions++; |
| 1991 | continue; |
| 1992 | } |
| 1993 | |
Sean Christopherson | ddc16ab | 2021-06-22 10:56:54 -0700 | [diff] [blame] | 1994 | if (sp->role.word != role.word) { |
| 1995 | /* |
| 1996 | * If the guest is creating an upper-level page, zap |
| 1997 | * unsync pages for the same gfn. While it's possible |
| 1998 | * the guest is using recursive page tables, in all |
| 1999 | * likelihood the guest has stopped using the unsync |
| 2000 | * page and is installing a completely unrelated page. |
| 2001 | * Unsync pages must not be left as is, because the new |
| 2002 | * upper-level page will be write-protected. |
| 2003 | */ |
| 2004 | if (level > PG_LEVEL_4K && sp->unsync) |
| 2005 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, |
| 2006 | &invalid_list); |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2007 | continue; |
Sean Christopherson | ddc16ab | 2021-06-22 10:56:54 -0700 | [diff] [blame] | 2008 | } |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2009 | |
Sean Christopherson | fb58a9c | 2020-06-23 12:40:27 -0700 | [diff] [blame] | 2010 | if (direct_mmu) |
| 2011 | goto trace_get_page; |
| 2012 | |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2013 | if (sp->unsync) { |
| 2014 | /* The page is good, but __kvm_sync_page might still end |
| 2015 | * up zapping it. If so, break in order to rebuild it. |
| 2016 | */ |
| 2017 | if (!__kvm_sync_page(vcpu, sp, &invalid_list)) |
| 2018 | break; |
| 2019 | |
| 2020 | WARN_ON(!list_empty(&invalid_list)); |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 2021 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2022 | } |
Xiao Guangrong | e02aa90 | 2010-05-15 18:52:34 +0800 | [diff] [blame] | 2023 | |
Takuya Yoshikawa | 98bba23 | 2015-11-26 21:14:34 +0900 | [diff] [blame] | 2024 | if (sp->unsync_children) |
Lai Jiangshan | f6f6195 | 2020-09-02 21:54:21 +0800 | [diff] [blame] | 2025 | kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); |
Xiao Guangrong | e02aa90 | 2010-05-15 18:52:34 +0800 | [diff] [blame] | 2026 | |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 2027 | __clear_sp_write_flooding_count(sp); |
Sean Christopherson | fb58a9c | 2020-06-23 12:40:27 -0700 | [diff] [blame] | 2028 | |
| 2029 | trace_get_page: |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2030 | trace_kvm_mmu_get_page(sp, false); |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2031 | goto out; |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2032 | } |
Takuya Yoshikawa | 4700579 | 2015-11-20 17:46:29 +0900 | [diff] [blame] | 2033 | |
Avi Kivity | dfc5aa0 | 2007-12-18 19:47:18 +0200 | [diff] [blame] | 2034 | ++vcpu->kvm->stat.mmu_cache_miss; |
Takuya Yoshikawa | 4700579 | 2015-11-20 17:46:29 +0900 | [diff] [blame] | 2035 | |
| 2036 | sp = kvm_mmu_alloc_page(vcpu, direct); |
| 2037 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2038 | sp->gfn = gfn; |
| 2039 | sp->role = role; |
Sean Christopherson | ac101b7 | 2020-06-23 12:40:26 -0700 | [diff] [blame] | 2040 | hlist_add_head(&sp->hash_link, sp_list); |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 2041 | if (!direct) { |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 2042 | account_shadowed(vcpu->kvm, sp); |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2043 | if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn)) |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 2044 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2045 | } |
Avi Kivity | f691fe1 | 2009-07-06 15:58:14 +0300 | [diff] [blame] | 2046 | trace_kvm_mmu_get_page(sp, true); |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2047 | out: |
Sean Christopherson | ddc16ab | 2021-06-22 10:56:54 -0700 | [diff] [blame] | 2048 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 2049 | |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2050 | if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions) |
| 2051 | vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions; |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2052 | return sp; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2053 | } |
| 2054 | |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2055 | static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator, |
| 2056 | struct kvm_vcpu *vcpu, hpa_t root, |
| 2057 | u64 addr) |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2058 | { |
| 2059 | iterator->addr = addr; |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2060 | iterator->shadow_addr = root; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2061 | iterator->level = vcpu->arch.mmu->shadow_root_level; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2062 | |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 2063 | if (iterator->level == PT64_ROOT_4LEVEL && |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2064 | vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && |
| 2065 | !vcpu->arch.mmu->direct_map) |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2066 | --iterator->level; |
| 2067 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2068 | if (iterator->level == PT32E_ROOT_LEVEL) { |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2069 | /* |
| 2070 | * prev_root is currently only used for 64-bit hosts. So only |
| 2071 | * the active root_hpa is valid here. |
| 2072 | */ |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2073 | BUG_ON(root != vcpu->arch.mmu->root_hpa); |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2074 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2075 | iterator->shadow_addr |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2076 | = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2077 | iterator->shadow_addr &= PT64_BASE_ADDR_MASK; |
| 2078 | --iterator->level; |
| 2079 | if (!iterator->shadow_addr) |
| 2080 | iterator->level = 0; |
| 2081 | } |
| 2082 | } |
| 2083 | |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2084 | static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, |
| 2085 | struct kvm_vcpu *vcpu, u64 addr) |
| 2086 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2087 | shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2088 | addr); |
| 2089 | } |
| 2090 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2091 | static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) |
| 2092 | { |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2093 | if (iterator->level < PG_LEVEL_4K) |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2094 | return false; |
Marcelo Tosatti | 4d88954 | 2009-06-11 12:07:41 -0300 | [diff] [blame] | 2095 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2096 | iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); |
| 2097 | iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; |
| 2098 | return true; |
| 2099 | } |
| 2100 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2101 | static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, |
| 2102 | u64 spte) |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2103 | { |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2104 | if (is_last_spte(spte, iterator->level)) { |
Xiao Guangrong | 052331b | 2011-07-12 03:21:17 +0800 | [diff] [blame] | 2105 | iterator->level = 0; |
| 2106 | return; |
| 2107 | } |
| 2108 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2109 | iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2110 | --iterator->level; |
| 2111 | } |
| 2112 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2113 | static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) |
| 2114 | { |
David Hildenbrand | bb606a9 | 2017-08-24 20:51:23 +0200 | [diff] [blame] | 2115 | __shadow_walk_next(iterator, *iterator->sptep); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2116 | } |
| 2117 | |
Takuya Yoshikawa | 98bba23 | 2015-11-26 21:14:34 +0900 | [diff] [blame] | 2118 | static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, |
| 2119 | struct kvm_mmu_page *sp) |
Avi Kivity | 32ef26a | 2010-07-13 14:27:04 +0300 | [diff] [blame] | 2120 | { |
| 2121 | u64 spte; |
| 2122 | |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 2123 | BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); |
Yang Zhang | 7a1638c | 2013-08-05 11:07:13 +0300 | [diff] [blame] | 2124 | |
Ben Gardon | cc4674d | 2020-09-25 14:22:48 -0700 | [diff] [blame] | 2125 | spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp)); |
Xiao Guangrong | 24db273 | 2013-02-05 15:28:02 +0800 | [diff] [blame] | 2126 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 2127 | mmu_spte_set(sptep, spte); |
Takuya Yoshikawa | 98bba23 | 2015-11-26 21:14:34 +0900 | [diff] [blame] | 2128 | |
| 2129 | mmu_page_add_parent_pte(vcpu, sp, sptep); |
| 2130 | |
| 2131 | if (sp->unsync_children || sp->unsync) |
| 2132 | mark_unsync(sptep); |
Avi Kivity | 32ef26a | 2010-07-13 14:27:04 +0300 | [diff] [blame] | 2133 | } |
| 2134 | |
Avi Kivity | a357bd2 | 2010-07-13 14:27:07 +0300 | [diff] [blame] | 2135 | static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
| 2136 | unsigned direct_access) |
| 2137 | { |
| 2138 | if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { |
| 2139 | struct kvm_mmu_page *child; |
| 2140 | |
| 2141 | /* |
| 2142 | * For the direct sp, if the guest pte's dirty bit |
| 2143 | * changed form clean to dirty, it will corrupt the |
| 2144 | * sp's access: allow writable in the read-only sp, |
| 2145 | * so we should update the spte at this point to get |
| 2146 | * a new sp with the correct access. |
| 2147 | */ |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 2148 | child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK); |
Avi Kivity | a357bd2 | 2010-07-13 14:27:07 +0300 | [diff] [blame] | 2149 | if (child->role.access == direct_access) |
| 2150 | return; |
| 2151 | |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2152 | drop_parent_pte(child, sptep); |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 2153 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1); |
Avi Kivity | a357bd2 | 2010-07-13 14:27:07 +0300 | [diff] [blame] | 2154 | } |
| 2155 | } |
| 2156 | |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2157 | /* Returns the number of zapped non-leaf child shadow pages. */ |
| 2158 | static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 2159 | u64 *spte, struct list_head *invalid_list) |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2160 | { |
| 2161 | u64 pte; |
| 2162 | struct kvm_mmu_page *child; |
| 2163 | |
| 2164 | pte = *spte; |
| 2165 | if (is_shadow_present_pte(pte)) { |
Xiao Guangrong | 505aef8 | 2011-09-22 16:56:06 +0800 | [diff] [blame] | 2166 | if (is_last_spte(pte, sp->role.level)) { |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 2167 | drop_spte(kvm, spte); |
Xiao Guangrong | 505aef8 | 2011-09-22 16:56:06 +0800 | [diff] [blame] | 2168 | if (is_large_pte(pte)) |
| 2169 | --kvm->stat.lpages; |
| 2170 | } else { |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 2171 | child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2172 | drop_parent_pte(child, spte); |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2173 | |
| 2174 | /* |
| 2175 | * Recursively zap nested TDP SPs, parentless SPs are |
| 2176 | * unlikely to be used again in the near future. This |
| 2177 | * avoids retaining a large number of stale nested SPs. |
| 2178 | */ |
| 2179 | if (tdp_enabled && invalid_list && |
| 2180 | child->role.guest_mode && !child->parent_ptes.val) |
| 2181 | return kvm_mmu_prepare_zap_page(kvm, child, |
| 2182 | invalid_list); |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2183 | } |
Sean Christopherson | ace569e | 2020-09-23 15:14:05 -0700 | [diff] [blame] | 2184 | } else if (is_mmio_spte(pte)) { |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 2185 | mmu_spte_clear_no_track(spte); |
Sean Christopherson | ace569e | 2020-09-23 15:14:05 -0700 | [diff] [blame] | 2186 | } |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2187 | return 0; |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2188 | } |
| 2189 | |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2190 | static int kvm_mmu_page_unlink_children(struct kvm *kvm, |
| 2191 | struct kvm_mmu_page *sp, |
| 2192 | struct list_head *invalid_list) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2193 | { |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2194 | int zapped = 0; |
Avi Kivity | 697fe2e | 2007-01-05 16:36:46 -0800 | [diff] [blame] | 2195 | unsigned i; |
Avi Kivity | 697fe2e | 2007-01-05 16:36:46 -0800 | [diff] [blame] | 2196 | |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2197 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2198 | zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list); |
| 2199 | |
| 2200 | return zapped; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2201 | } |
| 2202 | |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2203 | static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2204 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 2205 | u64 *sptep; |
| 2206 | struct rmap_iterator iter; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2207 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 2208 | while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 2209 | drop_parent_pte(sp, sptep); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2210 | } |
| 2211 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2212 | static int mmu_zap_unsync_children(struct kvm *kvm, |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2213 | struct kvm_mmu_page *parent, |
| 2214 | struct list_head *invalid_list) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2215 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2216 | int i, zapped = 0; |
| 2217 | struct mmu_page_path parents; |
| 2218 | struct kvm_mmu_pages pages; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2219 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2220 | if (parent->role.level == PG_LEVEL_4K) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2221 | return 0; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2222 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2223 | while (mmu_unsync_walk(parent, &pages)) { |
| 2224 | struct kvm_mmu_page *sp; |
| 2225 | |
| 2226 | for_each_sp(pages, sp, parents, i) { |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2227 | kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2228 | mmu_pages_clear_parents(&parents); |
Xiao Guangrong | 77662e0 | 2010-04-16 16:34:42 +0800 | [diff] [blame] | 2229 | zapped++; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2230 | } |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2231 | } |
| 2232 | |
| 2233 | return zapped; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2234 | } |
| 2235 | |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2236 | static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, |
| 2237 | struct kvm_mmu_page *sp, |
| 2238 | struct list_head *invalid_list, |
| 2239 | int *nr_zapped) |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2240 | { |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2241 | bool list_unstable; |
Avi Kivity | f691fe1 | 2009-07-06 15:58:14 +0300 | [diff] [blame] | 2242 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2243 | trace_kvm_mmu_prepare_zap_page(sp); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2244 | ++kvm->stat.mmu_shadow_zapped; |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2245 | *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2246 | *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2247 | kvm_mmu_unlink_parents(kvm, sp); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 2248 | |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2249 | /* Zapping children means active_mmu_pages has become unstable. */ |
| 2250 | list_unstable = *nr_zapped; |
| 2251 | |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 2252 | if (!sp->role.invalid && !sp->role.direct) |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 2253 | unaccount_shadowed(kvm, sp); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 2254 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2255 | if (sp->unsync) |
| 2256 | kvm_unlink_unsync_page(kvm, sp); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2257 | if (!sp->root_count) { |
Gui Jianfeng | 54a4f02 | 2010-05-05 09:03:49 +0800 | [diff] [blame] | 2258 | /* Count self */ |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2259 | (*nr_zapped)++; |
Sean Christopherson | f95eec9 | 2020-06-23 12:35:39 -0700 | [diff] [blame] | 2260 | |
| 2261 | /* |
| 2262 | * Already invalid pages (previously active roots) are not on |
| 2263 | * the active page list. See list_del() in the "else" case of |
| 2264 | * !sp->root_count. |
| 2265 | */ |
| 2266 | if (sp->role.invalid) |
| 2267 | list_add(&sp->link, invalid_list); |
| 2268 | else |
| 2269 | list_move(&sp->link, invalid_list); |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame] | 2270 | kvm_mod_used_mmu_pages(kvm, -1); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 2271 | } else { |
Sean Christopherson | f95eec9 | 2020-06-23 12:35:39 -0700 | [diff] [blame] | 2272 | /* |
| 2273 | * Remove the active root from the active page list, the root |
| 2274 | * will be explicitly freed when the root_count hits zero. |
| 2275 | */ |
| 2276 | list_del(&sp->link); |
Gleb Natapov | 05988d7 | 2013-05-31 08:36:30 +0800 | [diff] [blame] | 2277 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 2278 | /* |
| 2279 | * Obsolete pages cannot be used on any vCPUs, see the comment |
| 2280 | * in kvm_mmu_zap_all_fast(). Note, is_obsolete_sp() also |
| 2281 | * treats invalid shadow pages as being obsolete. |
| 2282 | */ |
| 2283 | if (!is_obsolete_sp(kvm, sp)) |
Gleb Natapov | 05988d7 | 2013-05-31 08:36:30 +0800 | [diff] [blame] | 2284 | kvm_reload_remote_mmus(kvm); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 2285 | } |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2286 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2287 | if (sp->lpage_disallowed) |
| 2288 | unaccount_huge_nx_page(kvm, sp); |
| 2289 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2290 | sp->role.invalid = 1; |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2291 | return list_unstable; |
| 2292 | } |
| 2293 | |
| 2294 | static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 2295 | struct list_head *invalid_list) |
| 2296 | { |
| 2297 | int nr_zapped; |
| 2298 | |
| 2299 | __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); |
| 2300 | return nr_zapped; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2301 | } |
| 2302 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2303 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 2304 | struct list_head *invalid_list) |
| 2305 | { |
Takuya Yoshikawa | 945315b | 2013-03-06 16:05:52 +0900 | [diff] [blame] | 2306 | struct kvm_mmu_page *sp, *nsp; |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2307 | |
| 2308 | if (list_empty(invalid_list)) |
| 2309 | return; |
| 2310 | |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 2311 | /* |
Lan Tianyu | 9753f52 | 2016-03-13 11:10:24 +0800 | [diff] [blame] | 2312 | * We need to make sure everyone sees our modifications to |
| 2313 | * the page tables and see changes to vcpu->mode here. The barrier |
| 2314 | * in the kvm_flush_remote_tlbs() achieves this. This pairs |
| 2315 | * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end. |
| 2316 | * |
| 2317 | * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit |
| 2318 | * guest mode and/or lockless shadow page table walks. |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 2319 | */ |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2320 | kvm_flush_remote_tlbs(kvm); |
| 2321 | |
Takuya Yoshikawa | 945315b | 2013-03-06 16:05:52 +0900 | [diff] [blame] | 2322 | list_for_each_entry_safe(sp, nsp, invalid_list, link) { |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2323 | WARN_ON(!sp->role.invalid || sp->root_count); |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame] | 2324 | kvm_mmu_free_page(sp); |
Takuya Yoshikawa | 945315b | 2013-03-06 16:05:52 +0900 | [diff] [blame] | 2325 | } |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2326 | } |
| 2327 | |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2328 | static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm, |
| 2329 | unsigned long nr_to_zap) |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2330 | { |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2331 | unsigned long total_zapped = 0; |
| 2332 | struct kvm_mmu_page *sp, *tmp; |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2333 | LIST_HEAD(invalid_list); |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2334 | bool unstable; |
| 2335 | int nr_zapped; |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2336 | |
| 2337 | if (list_empty(&kvm->arch.active_mmu_pages)) |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2338 | return 0; |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2339 | |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2340 | restart: |
Sean Christopherson | 8fc5172 | 2021-01-13 12:50:30 -0800 | [diff] [blame] | 2341 | list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) { |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2342 | /* |
| 2343 | * Don't zap active root pages, the page itself can't be freed |
| 2344 | * and zapping it will just force vCPUs to realloc and reload. |
| 2345 | */ |
| 2346 | if (sp->root_count) |
| 2347 | continue; |
| 2348 | |
| 2349 | unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, |
| 2350 | &nr_zapped); |
| 2351 | total_zapped += nr_zapped; |
| 2352 | if (total_zapped >= nr_to_zap) |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2353 | break; |
| 2354 | |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2355 | if (unstable) |
| 2356 | goto restart; |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2357 | } |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2358 | |
| 2359 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
| 2360 | |
| 2361 | kvm->stat.mmu_recycled += total_zapped; |
| 2362 | return total_zapped; |
| 2363 | } |
| 2364 | |
Sean Christopherson | afe8d7e | 2020-06-22 13:20:30 -0700 | [diff] [blame] | 2365 | static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm) |
| 2366 | { |
| 2367 | if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) |
| 2368 | return kvm->arch.n_max_mmu_pages - |
| 2369 | kvm->arch.n_used_mmu_pages; |
| 2370 | |
| 2371 | return 0; |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2372 | } |
| 2373 | |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2374 | static int make_mmu_pages_available(struct kvm_vcpu *vcpu) |
| 2375 | { |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2376 | unsigned long avail = kvm_mmu_available_pages(vcpu->kvm); |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2377 | |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2378 | if (likely(avail >= KVM_MIN_FREE_MMU_PAGES)) |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2379 | return 0; |
| 2380 | |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2381 | kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail); |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2382 | |
Sean Christopherson | 6e6ec58 | 2021-03-04 17:10:50 -0800 | [diff] [blame] | 2383 | /* |
| 2384 | * Note, this check is intentionally soft, it only guarantees that one |
| 2385 | * page is available, while the caller may end up allocating as many as |
| 2386 | * four pages, e.g. for PAE roots or for 5-level paging. Temporarily |
| 2387 | * exceeding the (arbitrary by default) limit will not harm the host, |
| 2388 | * being too agressive may unnecessarily kill the guest, and getting an |
| 2389 | * exact count is far more trouble than it's worth, especially in the |
| 2390 | * page fault paths. |
| 2391 | */ |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2392 | if (!kvm_mmu_available_pages(vcpu->kvm)) |
| 2393 | return -ENOSPC; |
| 2394 | return 0; |
| 2395 | } |
| 2396 | |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2397 | /* |
| 2398 | * Changing the number of mmu pages allocated to the vm |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2399 | * Note: if goal_nr_mmu_pages is too small, you will get dead lock |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2400 | */ |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 2401 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2402 | { |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 2403 | write_lock(&kvm->mmu_lock); |
Takuya Yoshikawa | b34cb59 | 2013-01-08 19:46:07 +0900 | [diff] [blame] | 2404 | |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2405 | if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2406 | kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages - |
| 2407 | goal_nr_mmu_pages); |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2408 | |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2409 | goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2410 | } |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2411 | |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2412 | kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; |
Takuya Yoshikawa | b34cb59 | 2013-01-08 19:46:07 +0900 | [diff] [blame] | 2413 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 2414 | write_unlock(&kvm->mmu_lock); |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2415 | } |
| 2416 | |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 2417 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2418 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2419 | struct kvm_mmu_page *sp; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2420 | LIST_HEAD(invalid_list); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2421 | int r; |
| 2422 | |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 2423 | pgprintk("%s: looking for gfn %llx\n", __func__, gfn); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2424 | r = 0; |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 2425 | write_lock(&kvm->mmu_lock); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 2426 | for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 2427 | pgprintk("%s: gfn %llx role %x\n", __func__, gfn, |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2428 | sp->role.word); |
| 2429 | r = 1; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 2430 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2431 | } |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2432 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 2433 | write_unlock(&kvm->mmu_lock); |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 2434 | |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2435 | return r; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2436 | } |
Sean Christopherson | 96ad91a | 2021-02-12 16:50:15 -0800 | [diff] [blame] | 2437 | |
| 2438 | static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) |
| 2439 | { |
| 2440 | gpa_t gpa; |
| 2441 | int r; |
| 2442 | |
| 2443 | if (vcpu->arch.mmu->direct_map) |
| 2444 | return 0; |
| 2445 | |
| 2446 | gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); |
| 2447 | |
| 2448 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
| 2449 | |
| 2450 | return r; |
| 2451 | } |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2452 | |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2453 | static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2454 | { |
Xiao Guangrong | 5e1b3dd | 2010-04-28 11:55:06 +0800 | [diff] [blame] | 2455 | trace_kvm_mmu_unsync_page(sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2456 | ++vcpu->kvm->stat.mmu_unsync; |
| 2457 | sp->unsync = 1; |
Marcelo Tosatti | 6cffe8c | 2008-12-01 22:32:04 -0200 | [diff] [blame] | 2458 | |
Xiao Guangrong | 6b18493 | 2010-04-16 21:29:17 +0800 | [diff] [blame] | 2459 | kvm_mmu_mark_parents_unsync(sp); |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2460 | } |
| 2461 | |
Paolo Bonzini | 5a9624a | 2020-10-16 10:29:37 -0400 | [diff] [blame] | 2462 | bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2463 | bool can_unsync) |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2464 | { |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2465 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2466 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2467 | if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) |
| 2468 | return true; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2469 | |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2470 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { |
Xiao Guangrong | 36a2e67 | 2010-06-30 16:02:02 +0800 | [diff] [blame] | 2471 | if (!can_unsync) |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2472 | return true; |
Xiao Guangrong | 36a2e67 | 2010-06-30 16:02:02 +0800 | [diff] [blame] | 2473 | |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2474 | if (sp->unsync) |
| 2475 | continue; |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2476 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2477 | WARN_ON(sp->role.level != PG_LEVEL_4K); |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2478 | kvm_unsync_page(vcpu, sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2479 | } |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2480 | |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 2481 | /* |
| 2482 | * We need to ensure that the marking of unsync pages is visible |
| 2483 | * before the SPTE is updated to allow writes because |
| 2484 | * kvm_mmu_sync_roots() checks the unsync flags without holding |
| 2485 | * the MMU lock and so can race with this. If the SPTE was updated |
| 2486 | * before the page had been marked as unsync-ed, something like the |
| 2487 | * following could happen: |
| 2488 | * |
| 2489 | * CPU 1 CPU 2 |
| 2490 | * --------------------------------------------------------------------- |
| 2491 | * 1.2 Host updates SPTE |
| 2492 | * to be writable |
| 2493 | * 2.1 Guest writes a GPTE for GVA X. |
| 2494 | * (GPTE being in the guest page table shadowed |
| 2495 | * by the SP from CPU 1.) |
| 2496 | * This reads SPTE during the page table walk. |
| 2497 | * Since SPTE.W is read as 1, there is no |
| 2498 | * fault. |
| 2499 | * |
| 2500 | * 2.2 Guest issues TLB flush. |
| 2501 | * That causes a VM Exit. |
| 2502 | * |
| 2503 | * 2.3 kvm_mmu_sync_pages() reads sp->unsync. |
| 2504 | * Since it is false, so it just returns. |
| 2505 | * |
| 2506 | * 2.4 Guest accesses GVA X. |
| 2507 | * Since the mapping in the SP was not updated, |
| 2508 | * so the old mapping for GVA X incorrectly |
| 2509 | * gets used. |
| 2510 | * 1.1 Host marks SP |
| 2511 | * as unsync |
| 2512 | * (sp->unsync = true) |
| 2513 | * |
| 2514 | * The write barrier below ensures that 1.1 happens before 1.2 and thus |
| 2515 | * the situation in 2.4 does not arise. The implicit barrier in 2.2 |
| 2516 | * pairs with this write barrier. |
| 2517 | */ |
| 2518 | smp_wmb(); |
| 2519 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2520 | return false; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2521 | } |
| 2522 | |
Ben Gardon | 799a419 | 2020-10-14 20:26:41 +0200 | [diff] [blame] | 2523 | static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
| 2524 | unsigned int pte_access, int level, |
| 2525 | gfn_t gfn, kvm_pfn_t pfn, bool speculative, |
| 2526 | bool can_unsync, bool host_writable) |
| 2527 | { |
| 2528 | u64 spte; |
| 2529 | struct kvm_mmu_page *sp; |
| 2530 | int ret; |
| 2531 | |
Ben Gardon | 799a419 | 2020-10-14 20:26:41 +0200 | [diff] [blame] | 2532 | sp = sptep_to_sp(sptep); |
| 2533 | |
| 2534 | ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, |
| 2535 | can_unsync, host_writable, sp_ad_disabled(sp), &spte); |
| 2536 | |
| 2537 | if (spte & PT_WRITABLE_MASK) |
| 2538 | kvm_vcpu_mark_page_dirty(vcpu, gfn); |
| 2539 | |
Sean Christopherson | 1270375 | 2020-09-23 15:04:25 -0700 | [diff] [blame] | 2540 | if (*sptep == spte) |
| 2541 | ret |= SET_SPTE_SPURIOUS; |
| 2542 | else if (mmu_spte_update(sptep, spte)) |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 2543 | ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2544 | return ret; |
| 2545 | } |
| 2546 | |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 2547 | static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
Sean Christopherson | e88b809 | 2020-09-23 11:37:35 -0700 | [diff] [blame] | 2548 | unsigned int pte_access, bool write_fault, int level, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 2549 | gfn_t gfn, kvm_pfn_t pfn, bool speculative, |
| 2550 | bool host_writable) |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2551 | { |
| 2552 | int was_rmapped = 0; |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 2553 | int rmap_count; |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 2554 | int set_spte_ret; |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 2555 | int ret = RET_PF_FIXED; |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 2556 | bool flush = false; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2557 | |
Xiao Guangrong | f761620 | 2013-02-05 15:27:27 +0800 | [diff] [blame] | 2558 | pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, |
| 2559 | *sptep, write_fault, gfn); |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2560 | |
Sean Christopherson | a54aa15 | 2021-02-25 12:47:32 -0800 | [diff] [blame] | 2561 | if (unlikely(is_noslot_pfn(pfn))) { |
| 2562 | mark_mmio_spte(vcpu, sptep, gfn, pte_access); |
| 2563 | return RET_PF_EMULATE; |
| 2564 | } |
| 2565 | |
Takuya Yoshikawa | afd28fe | 2015-11-20 17:44:55 +0900 | [diff] [blame] | 2566 | if (is_shadow_present_pte(*sptep)) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2567 | /* |
| 2568 | * If we overwrite a PTE page pointer with a 2MB PMD, unlink |
| 2569 | * the parent of the now unreachable PTE. |
| 2570 | */ |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2571 | if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2572 | struct kvm_mmu_page *child; |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2573 | u64 pte = *sptep; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2574 | |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 2575 | child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2576 | drop_parent_pte(child, sptep); |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 2577 | flush = true; |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2578 | } else if (pfn != spte_to_pfn(*sptep)) { |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 2579 | pgprintk("hfn old %llx new %llx\n", |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2580 | spte_to_pfn(*sptep), pfn); |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 2581 | drop_spte(vcpu->kvm, sptep); |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 2582 | flush = true; |
Joerg Roedel | 6bed6b9 | 2009-02-18 14:08:59 +0100 | [diff] [blame] | 2583 | } else |
| 2584 | was_rmapped = 1; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2585 | } |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 2586 | |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 2587 | set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, |
| 2588 | speculative, true, host_writable); |
| 2589 | if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2590 | if (write_fault) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 2591 | ret = RET_PF_EMULATE; |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 2592 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Marcelo Tosatti | a378b4e | 2008-09-23 13:18:31 -0300 | [diff] [blame] | 2593 | } |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 2594 | |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 2595 | if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 2596 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, |
| 2597 | KVM_PAGES_PER_HPAGE(level)); |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2598 | |
Sean Christopherson | 1270375 | 2020-09-23 15:04:25 -0700 | [diff] [blame] | 2599 | /* |
| 2600 | * The fault is fully spurious if and only if the new SPTE and old SPTE |
| 2601 | * are identical, and emulation is not required. |
| 2602 | */ |
| 2603 | if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) { |
| 2604 | WARN_ON_ONCE(!was_rmapped); |
| 2605 | return RET_PF_SPURIOUS; |
| 2606 | } |
| 2607 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2608 | pgprintk("%s: setting spte %llx\n", __func__, *sptep); |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 2609 | trace_kvm_mmu_set_spte(level, gfn, sptep); |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2610 | if (!was_rmapped && is_large_pte(*sptep)) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 2611 | ++vcpu->kvm->stat.lpages; |
| 2612 | |
Xiao Guangrong | ffb61bb | 2011-07-12 03:22:01 +0800 | [diff] [blame] | 2613 | if (is_shadow_present_pte(*sptep)) { |
Xiao Guangrong | ffb61bb | 2011-07-12 03:22:01 +0800 | [diff] [blame] | 2614 | if (!was_rmapped) { |
| 2615 | rmap_count = rmap_add(vcpu, sptep, gfn); |
| 2616 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) |
| 2617 | rmap_recycle(vcpu, sptep, gfn); |
| 2618 | } |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 2619 | } |
Xiao Guangrong | cb9aaa3 | 2012-08-03 15:42:10 +0800 | [diff] [blame] | 2620 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 2621 | return ret; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 2622 | } |
| 2623 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2624 | static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2625 | bool no_dirty_log) |
| 2626 | { |
| 2627 | struct kvm_memory_slot *slot; |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2628 | |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 2629 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); |
Xiao Guangrong | 903816f | 2012-07-17 21:54:11 +0800 | [diff] [blame] | 2630 | if (!slot) |
Xiao Guangrong | 6c8ee57 | 2012-08-03 15:37:54 +0800 | [diff] [blame] | 2631 | return KVM_PFN_ERR_FAULT; |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2632 | |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 2633 | return gfn_to_pfn_memslot_atomic(slot, gfn); |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2634 | } |
| 2635 | |
| 2636 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, |
| 2637 | struct kvm_mmu_page *sp, |
| 2638 | u64 *start, u64 *end) |
| 2639 | { |
| 2640 | struct page *pages[PTE_PREFETCH_NUM]; |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 2641 | struct kvm_memory_slot *slot; |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 2642 | unsigned int access = sp->role.access; |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2643 | int i, ret; |
| 2644 | gfn_t gfn; |
| 2645 | |
| 2646 | gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 2647 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); |
| 2648 | if (!slot) |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2649 | return -1; |
| 2650 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 2651 | ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2652 | if (ret <= 0) |
| 2653 | return -1; |
| 2654 | |
Junaid Shahid | 43fdcda | 2019-01-03 16:22:21 -0800 | [diff] [blame] | 2655 | for (i = 0; i < ret; i++, gfn++, start++) { |
Sean Christopherson | e88b809 | 2020-09-23 11:37:35 -0700 | [diff] [blame] | 2656 | mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn, |
Takuya Yoshikawa | 029499b | 2015-11-20 17:44:05 +0900 | [diff] [blame] | 2657 | page_to_pfn(pages[i]), true, true); |
Junaid Shahid | 43fdcda | 2019-01-03 16:22:21 -0800 | [diff] [blame] | 2658 | put_page(pages[i]); |
| 2659 | } |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2660 | |
| 2661 | return 0; |
| 2662 | } |
| 2663 | |
| 2664 | static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, |
| 2665 | struct kvm_mmu_page *sp, u64 *sptep) |
| 2666 | { |
| 2667 | u64 *spte, *start = NULL; |
| 2668 | int i; |
| 2669 | |
| 2670 | WARN_ON(!sp->role.direct); |
| 2671 | |
| 2672 | i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); |
| 2673 | spte = sp->spt + i; |
| 2674 | |
| 2675 | for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 2676 | if (is_shadow_present_pte(*spte) || spte == sptep) { |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2677 | if (!start) |
| 2678 | continue; |
| 2679 | if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) |
| 2680 | break; |
| 2681 | start = NULL; |
| 2682 | } else if (!start) |
| 2683 | start = spte; |
| 2684 | } |
| 2685 | } |
| 2686 | |
| 2687 | static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) |
| 2688 | { |
| 2689 | struct kvm_mmu_page *sp; |
| 2690 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 2691 | sp = sptep_to_sp(sptep); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 2692 | |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2693 | /* |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 2694 | * Without accessed bits, there's no way to distinguish between |
| 2695 | * actually accessed translations and prefetched, so disable pte |
| 2696 | * prefetch if accessed bits aren't available. |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2697 | */ |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 2698 | if (sp_ad_disabled(sp)) |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2699 | return; |
| 2700 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2701 | if (sp->role.level > PG_LEVEL_4K) |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2702 | return; |
| 2703 | |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 2704 | /* |
| 2705 | * If addresses are being invalidated, skip prefetching to avoid |
| 2706 | * accidentally prefetching those addresses. |
| 2707 | */ |
| 2708 | if (unlikely(vcpu->kvm->mmu_notifier_count)) |
| 2709 | return; |
| 2710 | |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2711 | __direct_pte_prefetch(vcpu, sp, sptep); |
| 2712 | } |
| 2713 | |
Sean Christopherson | 1b6d9d9 | 2021-02-12 16:50:04 -0800 | [diff] [blame] | 2714 | static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, |
Ben Gardon | 8ca6f06 | 2021-04-01 16:37:24 -0700 | [diff] [blame] | 2715 | const struct kvm_memory_slot *slot) |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 2716 | { |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 2717 | unsigned long hva; |
| 2718 | pte_t *pte; |
| 2719 | int level; |
| 2720 | |
Sean Christopherson | e851265 | 2020-01-08 12:24:48 -0800 | [diff] [blame] | 2721 | if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn)) |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2722 | return PG_LEVEL_4K; |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 2723 | |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 2724 | /* |
| 2725 | * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() |
| 2726 | * is not solely for performance, it's also necessary to avoid the |
| 2727 | * "writable" check in __gfn_to_hva_many(), which will always fail on |
| 2728 | * read-only memslots due to gfn_to_hva() assuming writes. Earlier |
| 2729 | * page fault steps have already verified the guest isn't writing a |
| 2730 | * read-only memslot. |
| 2731 | */ |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 2732 | hva = __gfn_to_hva_memslot(slot, gfn); |
| 2733 | |
Sean Christopherson | 1b6d9d9 | 2021-02-12 16:50:04 -0800 | [diff] [blame] | 2734 | pte = lookup_address_in_mm(kvm->mm, hva, &level); |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 2735 | if (unlikely(!pte)) |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2736 | return PG_LEVEL_4K; |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 2737 | |
| 2738 | return level; |
| 2739 | } |
| 2740 | |
Ben Gardon | 8ca6f06 | 2021-04-01 16:37:24 -0700 | [diff] [blame] | 2741 | int kvm_mmu_max_mapping_level(struct kvm *kvm, |
| 2742 | const struct kvm_memory_slot *slot, gfn_t gfn, |
| 2743 | kvm_pfn_t pfn, int max_level) |
Sean Christopherson | 1b6d9d9 | 2021-02-12 16:50:04 -0800 | [diff] [blame] | 2744 | { |
| 2745 | struct kvm_lpage_info *linfo; |
| 2746 | |
| 2747 | max_level = min(max_level, max_huge_page_level); |
| 2748 | for ( ; max_level > PG_LEVEL_4K; max_level--) { |
| 2749 | linfo = lpage_info_slot(gfn, slot, max_level); |
| 2750 | if (!linfo->disallow_lpage) |
| 2751 | break; |
| 2752 | } |
| 2753 | |
| 2754 | if (max_level == PG_LEVEL_4K) |
| 2755 | return PG_LEVEL_4K; |
| 2756 | |
| 2757 | return host_pfn_mapping_level(kvm, gfn, pfn, slot); |
| 2758 | } |
| 2759 | |
Ben Gardon | bb18842 | 2020-10-14 11:26:50 -0700 | [diff] [blame] | 2760 | int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2761 | int max_level, kvm_pfn_t *pfnp, |
| 2762 | bool huge_page_disallowed, int *req_level) |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 2763 | { |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 2764 | struct kvm_memory_slot *slot; |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 2765 | kvm_pfn_t pfn = *pfnp; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 2766 | kvm_pfn_t mask; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 2767 | int level; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 2768 | |
Sean Christopherson | 3cf0661 | 2020-09-23 11:37:31 -0700 | [diff] [blame] | 2769 | *req_level = PG_LEVEL_4K; |
| 2770 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2771 | if (unlikely(max_level == PG_LEVEL_4K)) |
| 2772 | return PG_LEVEL_4K; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 2773 | |
Sean Christopherson | e851265 | 2020-01-08 12:24:48 -0800 | [diff] [blame] | 2774 | if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn)) |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2775 | return PG_LEVEL_4K; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 2776 | |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 2777 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true); |
| 2778 | if (!slot) |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2779 | return PG_LEVEL_4K; |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 2780 | |
Sean Christopherson | 1b6d9d9 | 2021-02-12 16:50:04 -0800 | [diff] [blame] | 2781 | level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level); |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2782 | if (level == PG_LEVEL_4K) |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 2783 | return level; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 2784 | |
Sean Christopherson | 3cf0661 | 2020-09-23 11:37:31 -0700 | [diff] [blame] | 2785 | *req_level = level = min(level, max_level); |
| 2786 | |
| 2787 | /* |
| 2788 | * Enforce the iTLB multihit workaround after capturing the requested |
| 2789 | * level, which will be used to do precise, accurate accounting. |
| 2790 | */ |
| 2791 | if (huge_page_disallowed) |
| 2792 | return PG_LEVEL_4K; |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 2793 | |
| 2794 | /* |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 2795 | * mmu_notifier_retry() was successful and mmu_lock is held, so |
| 2796 | * the pmd can't be split from under us. |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 2797 | */ |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 2798 | mask = KVM_PAGES_PER_HPAGE(level) - 1; |
| 2799 | VM_BUG_ON((gfn & mask) != (pfn & mask)); |
| 2800 | *pfnp = pfn & ~mask; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 2801 | |
| 2802 | return level; |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 2803 | } |
| 2804 | |
Ben Gardon | bb18842 | 2020-10-14 11:26:50 -0700 | [diff] [blame] | 2805 | void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, |
| 2806 | kvm_pfn_t *pfnp, int *goal_levelp) |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2807 | { |
Ben Gardon | bb18842 | 2020-10-14 11:26:50 -0700 | [diff] [blame] | 2808 | int level = *goal_levelp; |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2809 | |
Ben Gardon | 7d94531 | 2020-10-14 11:26:49 -0700 | [diff] [blame] | 2810 | if (cur_level == level && level > PG_LEVEL_4K && |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2811 | is_shadow_present_pte(spte) && |
| 2812 | !is_large_pte(spte)) { |
| 2813 | /* |
| 2814 | * A small SPTE exists for this pfn, but FNAME(fetch) |
| 2815 | * and __direct_map would like to create a large PTE |
| 2816 | * instead: just force them to go down another level, |
| 2817 | * patching back for them into pfn the next 9 bits of |
| 2818 | * the address. |
| 2819 | */ |
Ben Gardon | 7d94531 | 2020-10-14 11:26:49 -0700 | [diff] [blame] | 2820 | u64 page_mask = KVM_PAGES_PER_HPAGE(level) - |
| 2821 | KVM_PAGES_PER_HPAGE(level - 1); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2822 | *pfnp |= gfn & page_mask; |
Ben Gardon | bb18842 | 2020-10-14 11:26:50 -0700 | [diff] [blame] | 2823 | (*goal_levelp)--; |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2824 | } |
| 2825 | } |
| 2826 | |
Sean Christopherson | 6c2fd34 | 2020-09-23 11:37:30 -0700 | [diff] [blame] | 2827 | static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 2828 | int map_writable, int max_level, kvm_pfn_t pfn, |
Sean Christopherson | 6c2fd34 | 2020-09-23 11:37:30 -0700 | [diff] [blame] | 2829 | bool prefault, bool is_tdp) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2830 | { |
Sean Christopherson | 6c2fd34 | 2020-09-23 11:37:30 -0700 | [diff] [blame] | 2831 | bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); |
| 2832 | bool write = error_code & PFERR_WRITE_MASK; |
| 2833 | bool exec = error_code & PFERR_FETCH_MASK; |
| 2834 | bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 2835 | struct kvm_shadow_walk_iterator it; |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 2836 | struct kvm_mmu_page *sp; |
Sean Christopherson | 3cf0661 | 2020-09-23 11:37:31 -0700 | [diff] [blame] | 2837 | int level, req_level, ret; |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 2838 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 2839 | gfn_t base_gfn = gfn; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2840 | |
Sean Christopherson | 3cf0661 | 2020-09-23 11:37:31 -0700 | [diff] [blame] | 2841 | level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn, |
| 2842 | huge_page_disallowed, &req_level); |
Sean Christopherson | 4cd071d | 2019-12-06 15:57:26 -0800 | [diff] [blame] | 2843 | |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 2844 | trace_kvm_mmu_spte_requested(gpa, level, pfn); |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 2845 | for_each_shadow_entry(vcpu, gpa, it) { |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2846 | /* |
| 2847 | * We cannot overwrite existing page tables with an NX |
| 2848 | * large page, as the leaf could be executable. |
| 2849 | */ |
Sean Christopherson | dcc7065 | 2020-09-23 11:37:34 -0700 | [diff] [blame] | 2850 | if (nx_huge_page_workaround_enabled) |
Ben Gardon | 7d94531 | 2020-10-14 11:26:49 -0700 | [diff] [blame] | 2851 | disallowed_hugepage_adjust(*it.sptep, gfn, it.level, |
| 2852 | &pfn, &level); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2853 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 2854 | base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); |
| 2855 | if (it.level == level) |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 2856 | break; |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 2857 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 2858 | drop_large_spte(vcpu, it.sptep); |
| 2859 | if (!is_shadow_present_pte(*it.sptep)) { |
| 2860 | sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, |
| 2861 | it.level - 1, true, ACC_ALL); |
Lai Jiangshan | c9fa0b3 | 2010-05-26 16:48:25 +0800 | [diff] [blame] | 2862 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 2863 | link_shadow_page(vcpu, it.sptep, sp); |
Sean Christopherson | 5bcaf3e | 2020-09-23 11:37:32 -0700 | [diff] [blame] | 2864 | if (is_tdp && huge_page_disallowed && |
| 2865 | req_level >= it.level) |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2866 | account_huge_nx_page(vcpu->kvm, sp); |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 2867 | } |
| 2868 | } |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 2869 | |
| 2870 | ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, |
| 2871 | write, level, base_gfn, pfn, prefault, |
| 2872 | map_writable); |
Sean Christopherson | 1270375 | 2020-09-23 15:04:25 -0700 | [diff] [blame] | 2873 | if (ret == RET_PF_SPURIOUS) |
| 2874 | return ret; |
| 2875 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 2876 | direct_pte_prefetch(vcpu, it.sptep); |
| 2877 | ++vcpu->stat.pf_fixed; |
| 2878 | return ret; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2879 | } |
| 2880 | |
Huang Ying | 77db5cb | 2010-10-08 16:24:15 +0800 | [diff] [blame] | 2881 | static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2882 | { |
Eric W. Biederman | 585a8b9 | 2018-04-16 14:23:27 -0500 | [diff] [blame] | 2883 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk); |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2884 | } |
| 2885 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2886 | static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2887 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2888 | /* |
| 2889 | * Do not cache the mmio info caused by writing the readonly gfn |
| 2890 | * into the spte otherwise read access on readonly gfn also can |
| 2891 | * caused mmio page fault and treat it as mmio access. |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2892 | */ |
| 2893 | if (pfn == KVM_PFN_ERR_RO_FAULT) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 2894 | return RET_PF_EMULATE; |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 2895 | |
Xiao Guangrong | e6c1502 | 2012-08-03 15:38:36 +0800 | [diff] [blame] | 2896 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 2897 | kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 2898 | return RET_PF_RETRY; |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 2899 | } |
Gleb Natapov | edba23e | 2010-07-07 20:16:45 +0300 | [diff] [blame] | 2900 | |
Sean Christopherson | 2c151b2 | 2018-03-29 14:48:30 -0700 | [diff] [blame] | 2901 | return -EFAULT; |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2902 | } |
| 2903 | |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 2904 | static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 2905 | kvm_pfn_t pfn, unsigned int access, |
| 2906 | int *ret_val) |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 2907 | { |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 2908 | /* The pfn is invalid, report the error! */ |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 2909 | if (unlikely(is_error_pfn(pfn))) { |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 2910 | *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); |
Paolo Bonzini | 798e88b | 2016-02-23 15:28:51 +0100 | [diff] [blame] | 2911 | return true; |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 2912 | } |
| 2913 | |
Sean Christopherson | 30ab590 | 2021-02-25 12:47:31 -0800 | [diff] [blame] | 2914 | if (unlikely(is_noslot_pfn(pfn))) { |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 2915 | vcpu_cache_mmio_info(vcpu, gva, gfn, |
| 2916 | access & shadow_mmio_access_mask); |
Sean Christopherson | 30ab590 | 2021-02-25 12:47:31 -0800 | [diff] [blame] | 2917 | /* |
| 2918 | * If MMIO caching is disabled, emulate immediately without |
| 2919 | * touching the shadow page tables as attempting to install an |
| 2920 | * MMIO SPTE will just be an expensive nop. |
| 2921 | */ |
| 2922 | if (unlikely(!shadow_mmio_value)) { |
| 2923 | *ret_val = RET_PF_EMULATE; |
| 2924 | return true; |
| 2925 | } |
| 2926 | } |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 2927 | |
Paolo Bonzini | 798e88b | 2016-02-23 15:28:51 +0100 | [diff] [blame] | 2928 | return false; |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 2929 | } |
| 2930 | |
Xiao Guangrong | e5552fd | 2013-07-30 21:01:59 +0800 | [diff] [blame] | 2931 | static bool page_fault_can_be_fast(u32 error_code) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 2932 | { |
| 2933 | /* |
Xiao Guangrong | 1c118b8 | 2013-07-18 12:52:37 +0800 | [diff] [blame] | 2934 | * Do not fix the mmio spte with invalid generation number which |
| 2935 | * need to be updated by slow page fault path. |
| 2936 | */ |
| 2937 | if (unlikely(error_code & PFERR_RSVD_MASK)) |
| 2938 | return false; |
| 2939 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 2940 | /* See if the page fault is due to an NX violation */ |
| 2941 | if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)) |
| 2942 | == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)))) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 2943 | return false; |
| 2944 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 2945 | /* |
| 2946 | * #PF can be fast if: |
| 2947 | * 1. The shadow page table entry is not present, which could mean that |
| 2948 | * the fault is potentially caused by access tracking (if enabled). |
| 2949 | * 2. The shadow page table entry is present and the fault |
| 2950 | * is caused by write-protect, that means we just need change the W |
| 2951 | * bit of the spte which can be done out of mmu-lock. |
| 2952 | * |
| 2953 | * However, if access tracking is disabled we know that a non-present |
| 2954 | * page must be a genuine page fault where we have to create a new SPTE. |
| 2955 | * So, if access tracking is disabled, we return true only for write |
| 2956 | * accesses to a present page. |
| 2957 | */ |
| 2958 | |
| 2959 | return shadow_acc_track_mask != 0 || |
| 2960 | ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)) |
| 2961 | == (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 2962 | } |
| 2963 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 2964 | /* |
| 2965 | * Returns true if the SPTE was fixed successfully. Otherwise, |
| 2966 | * someone else modified the SPTE from its original value. |
| 2967 | */ |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 2968 | static bool |
Xiao Guangrong | 92a476c | 2014-04-17 17:06:13 +0800 | [diff] [blame] | 2969 | fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 2970 | u64 *sptep, u64 old_spte, u64 new_spte) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 2971 | { |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 2972 | gfn_t gfn; |
| 2973 | |
| 2974 | WARN_ON(!sp->role.direct); |
| 2975 | |
Kai Huang | 9b51a63 | 2015-01-28 10:54:25 +0800 | [diff] [blame] | 2976 | /* |
| 2977 | * Theoretically we could also set dirty bit (and flush TLB) here in |
| 2978 | * order to eliminate unnecessary PML logging. See comments in |
| 2979 | * set_spte. But fast_page_fault is very unlikely to happen with PML |
| 2980 | * enabled, so we do not do this. This might result in the same GPA |
| 2981 | * to be logged in PML buffer again when the write really happens, and |
| 2982 | * eventually to be called by mark_page_dirty twice. But it's also no |
| 2983 | * harm. This also avoids the TLB flush needed after setting dirty bit |
| 2984 | * so non-PML cases won't be impacted. |
| 2985 | * |
| 2986 | * Compare with set_spte where instead shadow_dirty_mask is set. |
| 2987 | */ |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 2988 | if (cmpxchg64(sptep, old_spte, new_spte) != old_spte) |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 2989 | return false; |
| 2990 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 2991 | if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 2992 | /* |
| 2993 | * The gfn of direct spte is stable since it is |
| 2994 | * calculated by sp->gfn. |
| 2995 | */ |
| 2996 | gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); |
| 2997 | kvm_vcpu_mark_page_dirty(vcpu, gfn); |
| 2998 | } |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 2999 | |
| 3000 | return true; |
| 3001 | } |
| 3002 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3003 | static bool is_access_allowed(u32 fault_err_code, u64 spte) |
| 3004 | { |
| 3005 | if (fault_err_code & PFERR_FETCH_MASK) |
| 3006 | return is_executable_pte(spte); |
| 3007 | |
| 3008 | if (fault_err_code & PFERR_WRITE_MASK) |
| 3009 | return is_writable_pte(spte); |
| 3010 | |
| 3011 | /* Fault was on Read access */ |
| 3012 | return spte & PT_PRESENT_MASK; |
| 3013 | } |
| 3014 | |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3015 | /* |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3016 | * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS. |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3017 | */ |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3018 | static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
| 3019 | u32 error_code) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3020 | { |
| 3021 | struct kvm_shadow_walk_iterator iterator; |
Xiao Guangrong | 92a476c | 2014-04-17 17:06:13 +0800 | [diff] [blame] | 3022 | struct kvm_mmu_page *sp; |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3023 | int ret = RET_PF_INVALID; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3024 | u64 spte = 0ull; |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3025 | uint retry_count = 0; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3026 | |
Xiao Guangrong | e5552fd | 2013-07-30 21:01:59 +0800 | [diff] [blame] | 3027 | if (!page_fault_can_be_fast(error_code)) |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3028 | return ret; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3029 | |
| 3030 | walk_shadow_page_lockless_begin(vcpu); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3031 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3032 | do { |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3033 | u64 new_spte; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3034 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3035 | for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte) |
Sean Christopherson | f9fa250 | 2020-01-08 12:24:42 -0800 | [diff] [blame] | 3036 | if (!is_shadow_present_pte(spte)) |
Junaid Shahid | d162f30 | 2016-12-21 20:29:30 -0800 | [diff] [blame] | 3037 | break; |
| 3038 | |
Sean Christopherson | ec89e64 | 2021-02-25 12:47:28 -0800 | [diff] [blame] | 3039 | if (!is_shadow_present_pte(spte)) |
| 3040 | break; |
| 3041 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 3042 | sp = sptep_to_sp(iterator.sptep); |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3043 | if (!is_last_spte(spte, sp->role.level)) |
| 3044 | break; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3045 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3046 | /* |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3047 | * Check whether the memory access that caused the fault would |
| 3048 | * still cause it if it were to be performed right now. If not, |
| 3049 | * then this is a spurious fault caused by TLB lazily flushed, |
| 3050 | * or some other CPU has already fixed the PTE after the |
| 3051 | * current CPU took the fault. |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3052 | * |
| 3053 | * Need not check the access of upper level table entries since |
| 3054 | * they are always ACC_ALL. |
| 3055 | */ |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3056 | if (is_access_allowed(error_code, spte)) { |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3057 | ret = RET_PF_SPURIOUS; |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3058 | break; |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3059 | } |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3060 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3061 | new_spte = spte; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3062 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3063 | if (is_access_track_spte(spte)) |
| 3064 | new_spte = restore_acc_track_spte(new_spte); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3065 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3066 | /* |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3067 | * Currently, to simplify the code, write-protection can |
| 3068 | * be removed in the fast path only if the SPTE was |
| 3069 | * write-protected for dirty-logging or access tracking. |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3070 | */ |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3071 | if ((error_code & PFERR_WRITE_MASK) && |
Miaohe Lin | e630269 | 2020-02-15 10:44:22 +0800 | [diff] [blame] | 3072 | spte_can_locklessly_be_made_writable(spte)) { |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3073 | new_spte |= PT_WRITABLE_MASK; |
| 3074 | |
| 3075 | /* |
| 3076 | * Do not fix write-permission on the large spte. Since |
| 3077 | * we only dirty the first page into the dirty-bitmap in |
| 3078 | * fast_pf_fix_direct_spte(), other pages are missed |
| 3079 | * if its slot has dirty logging enabled. |
| 3080 | * |
| 3081 | * Instead, we let the slow page fault path create a |
| 3082 | * normal spte to fix the access. |
| 3083 | * |
| 3084 | * See the comments in kvm_arch_commit_memory_region(). |
| 3085 | */ |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3086 | if (sp->role.level > PG_LEVEL_4K) |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3087 | break; |
| 3088 | } |
| 3089 | |
| 3090 | /* Verify that the fault can be handled in the fast path */ |
| 3091 | if (new_spte == spte || |
| 3092 | !is_access_allowed(error_code, new_spte)) |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3093 | break; |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 3094 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3095 | /* |
| 3096 | * Currently, fast page fault only works for direct mapping |
| 3097 | * since the gfn is not stable for indirect shadow page. See |
Mauro Carvalho Chehab | 3ecad8c | 2020-04-14 18:48:36 +0200 | [diff] [blame] | 3098 | * Documentation/virt/kvm/locking.rst to get more detail. |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3099 | */ |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3100 | if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte, |
| 3101 | new_spte)) { |
| 3102 | ret = RET_PF_FIXED; |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3103 | break; |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3104 | } |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3105 | |
| 3106 | if (++retry_count > 4) { |
| 3107 | printk_once(KERN_WARNING |
| 3108 | "kvm: Fast #PF retrying more than 4 times.\n"); |
| 3109 | break; |
| 3110 | } |
| 3111 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3112 | } while (true); |
| 3113 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3114 | trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep, |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3115 | spte, ret); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3116 | walk_shadow_page_lockless_end(vcpu); |
| 3117 | |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3118 | return ret; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3119 | } |
| 3120 | |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3121 | static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, |
| 3122 | struct list_head *invalid_list) |
| 3123 | { |
| 3124 | struct kvm_mmu_page *sp; |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 3125 | |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3126 | if (!VALID_PAGE(*root_hpa)) |
| 3127 | return; |
| 3128 | |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 3129 | sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); |
Ben Gardon | 02c00b3 | 2020-10-14 20:26:44 +0200 | [diff] [blame] | 3130 | |
Ben Gardon | 2bdb3d8 | 2021-04-01 16:37:27 -0700 | [diff] [blame] | 3131 | if (is_tdp_mmu_page(sp)) |
Ben Gardon | 6103bc0 | 2021-04-01 16:37:32 -0700 | [diff] [blame] | 3132 | kvm_tdp_mmu_put_root(kvm, sp, false); |
Ben Gardon | 76eb54e | 2021-04-01 16:37:25 -0700 | [diff] [blame] | 3133 | else if (!--sp->root_count && sp->role.invalid) |
| 3134 | kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3135 | |
| 3136 | *root_hpa = INVALID_PAGE; |
| 3137 | } |
| 3138 | |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3139 | /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */ |
Vitaly Kuznetsov | 6a82cd1 | 2018-10-08 21:28:07 +0200 | [diff] [blame] | 3140 | void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 3141 | ulong roots_to_free) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3142 | { |
Sean Christopherson | 4d710de | 2020-09-23 12:12:04 -0700 | [diff] [blame] | 3143 | struct kvm *kvm = vcpu->kvm; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3144 | int i; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3145 | LIST_HEAD(invalid_list); |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3146 | bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3147 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3148 | BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3149 | |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3150 | /* Before acquiring the MMU lock, see if we need to do any real work. */ |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3151 | if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) { |
| 3152 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 3153 | if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) && |
| 3154 | VALID_PAGE(mmu->prev_roots[i].hpa)) |
| 3155 | break; |
| 3156 | |
| 3157 | if (i == KVM_MMU_NUM_PREV_ROOTS) |
| 3158 | return; |
| 3159 | } |
Gleb Natapov | 35af577 | 2013-05-16 11:55:51 +0300 | [diff] [blame] | 3160 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 3161 | write_lock(&kvm->mmu_lock); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3162 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3163 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 3164 | if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) |
Sean Christopherson | 4d710de | 2020-09-23 12:12:04 -0700 | [diff] [blame] | 3165 | mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa, |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3166 | &invalid_list); |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 3167 | |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3168 | if (free_active_root) { |
| 3169 | if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && |
| 3170 | (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { |
Sean Christopherson | 4d710de | 2020-09-23 12:12:04 -0700 | [diff] [blame] | 3171 | mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); |
Sean Christopherson | 04d4555 | 2021-03-04 17:10:46 -0800 | [diff] [blame] | 3172 | } else if (mmu->pae_root) { |
Sean Christopherson | c834e5e4 | 2021-03-09 14:42:06 -0800 | [diff] [blame] | 3173 | for (i = 0; i < 4; ++i) { |
| 3174 | if (!IS_VALID_PAE_ROOT(mmu->pae_root[i])) |
| 3175 | continue; |
| 3176 | |
| 3177 | mmu_free_root_page(kvm, &mmu->pae_root[i], |
| 3178 | &invalid_list); |
| 3179 | mmu->pae_root[i] = INVALID_PAE_ROOT; |
| 3180 | } |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3181 | } |
Sean Christopherson | 04d4555 | 2021-03-04 17:10:46 -0800 | [diff] [blame] | 3182 | mmu->root_hpa = INVALID_PAGE; |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3183 | mmu->root_pgd = 0; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3184 | } |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3185 | |
Sean Christopherson | 4d710de | 2020-09-23 12:12:04 -0700 | [diff] [blame] | 3186 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 3187 | write_unlock(&kvm->mmu_lock); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3188 | } |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3189 | EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3190 | |
Sean Christopherson | 25b62c6 | 2021-06-09 16:42:29 -0700 | [diff] [blame] | 3191 | void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
| 3192 | { |
| 3193 | unsigned long roots_to_free = 0; |
| 3194 | hpa_t root_hpa; |
| 3195 | int i; |
| 3196 | |
| 3197 | /* |
| 3198 | * This should not be called while L2 is active, L2 can't invalidate |
| 3199 | * _only_ its own roots, e.g. INVVPID unconditionally exits. |
| 3200 | */ |
| 3201 | WARN_ON_ONCE(mmu->mmu_role.base.guest_mode); |
| 3202 | |
| 3203 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { |
| 3204 | root_hpa = mmu->prev_roots[i].hpa; |
| 3205 | if (!VALID_PAGE(root_hpa)) |
| 3206 | continue; |
| 3207 | |
| 3208 | if (!to_shadow_page(root_hpa) || |
| 3209 | to_shadow_page(root_hpa)->role.guest_mode) |
| 3210 | roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); |
| 3211 | } |
| 3212 | |
| 3213 | kvm_mmu_free_roots(vcpu, mmu, roots_to_free); |
| 3214 | } |
| 3215 | EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots); |
| 3216 | |
| 3217 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3218 | static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) |
| 3219 | { |
| 3220 | int ret = 0; |
| 3221 | |
Vitaly Kuznetsov | 995decb | 2020-07-08 16:00:23 +0200 | [diff] [blame] | 3222 | if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 3223 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3224 | ret = 1; |
| 3225 | } |
| 3226 | |
| 3227 | return ret; |
| 3228 | } |
| 3229 | |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3230 | static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, |
| 3231 | u8 level, bool direct) |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3232 | { |
| 3233 | struct kvm_mmu_page *sp; |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3234 | |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3235 | sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL); |
| 3236 | ++sp->root_count; |
| 3237 | |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3238 | return __pa(sp->spt); |
| 3239 | } |
| 3240 | |
| 3241 | static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) |
| 3242 | { |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3243 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
| 3244 | u8 shadow_root_level = mmu->shadow_root_level; |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3245 | hpa_t root; |
Avi Kivity | 7ebaf15 | 2010-10-03 18:51:39 +0200 | [diff] [blame] | 3246 | unsigned i; |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 3247 | int r; |
| 3248 | |
| 3249 | write_lock(&vcpu->kvm->mmu_lock); |
| 3250 | r = make_mmu_pages_available(vcpu); |
| 3251 | if (r < 0) |
| 3252 | goto out_unlock; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3253 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 3254 | if (is_tdp_mmu_enabled(vcpu->kvm)) { |
Ben Gardon | 02c00b3 | 2020-10-14 20:26:44 +0200 | [diff] [blame] | 3255 | root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu); |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3256 | mmu->root_hpa = root; |
Ben Gardon | 02c00b3 | 2020-10-14 20:26:44 +0200 | [diff] [blame] | 3257 | } else if (shadow_root_level >= PT64_ROOT_4LEVEL) { |
Sean Christopherson | 6e6ec58 | 2021-03-04 17:10:50 -0800 | [diff] [blame] | 3258 | root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true); |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3259 | mmu->root_hpa = root; |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3260 | } else if (shadow_root_level == PT32E_ROOT_LEVEL) { |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 3261 | if (WARN_ON_ONCE(!mmu->pae_root)) { |
| 3262 | r = -EIO; |
| 3263 | goto out_unlock; |
| 3264 | } |
Sean Christopherson | 73ad160 | 2021-03-04 17:11:01 -0800 | [diff] [blame] | 3265 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3266 | for (i = 0; i < 4; ++i) { |
Sean Christopherson | c834e5e4 | 2021-03-09 14:42:06 -0800 | [diff] [blame] | 3267 | WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3268 | |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3269 | root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), |
| 3270 | i << 30, PT32_ROOT_LEVEL, true); |
Sean Christopherson | 17e368d | 2021-03-04 17:10:54 -0800 | [diff] [blame] | 3271 | mmu->pae_root[i] = root | PT_PRESENT_MASK | |
| 3272 | shadow_me_mask; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3273 | } |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3274 | mmu->root_hpa = __pa(mmu->pae_root); |
Sean Christopherson | 73ad160 | 2021-03-04 17:11:01 -0800 | [diff] [blame] | 3275 | } else { |
| 3276 | WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level); |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 3277 | r = -EIO; |
| 3278 | goto out_unlock; |
Sean Christopherson | 73ad160 | 2021-03-04 17:11:01 -0800 | [diff] [blame] | 3279 | } |
Sean Christopherson | 3651c7f | 2020-02-28 14:52:39 -0800 | [diff] [blame] | 3280 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3281 | /* root_pgd is ignored for direct MMUs. */ |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3282 | mmu->root_pgd = 0; |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 3283 | out_unlock: |
| 3284 | write_unlock(&vcpu->kvm->mmu_lock); |
| 3285 | return r; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3286 | } |
| 3287 | |
| 3288 | static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3289 | { |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3290 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Sean Christopherson | 6e0918a | 2021-03-04 17:10:51 -0800 | [diff] [blame] | 3291 | u64 pdptrs[4], pm_mask; |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3292 | gfn_t root_gfn, root_pgd; |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3293 | hpa_t root; |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 3294 | unsigned i; |
| 3295 | int r; |
Avi Kivity | 3bb65a2 | 2007-01-05 16:36:51 -0800 | [diff] [blame] | 3296 | |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3297 | root_pgd = mmu->get_guest_pgd(vcpu); |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3298 | root_gfn = root_pgd >> PAGE_SHIFT; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3299 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3300 | if (mmu_check_root(vcpu, root_gfn)) |
| 3301 | return 1; |
| 3302 | |
| 3303 | /* |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 3304 | * On SVM, reading PDPTRs might access guest memory, which might fault |
| 3305 | * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock. |
| 3306 | */ |
Sean Christopherson | 6e0918a | 2021-03-04 17:10:51 -0800 | [diff] [blame] | 3307 | if (mmu->root_level == PT32E_ROOT_LEVEL) { |
| 3308 | for (i = 0; i < 4; ++i) { |
| 3309 | pdptrs[i] = mmu->get_pdptr(vcpu, i); |
| 3310 | if (!(pdptrs[i] & PT_PRESENT_MASK)) |
| 3311 | continue; |
| 3312 | |
| 3313 | if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT)) |
| 3314 | return 1; |
| 3315 | } |
| 3316 | } |
| 3317 | |
Ben Gardon | d501f74 | 2021-05-18 10:34:14 -0700 | [diff] [blame] | 3318 | r = alloc_all_memslots_rmaps(vcpu->kvm); |
| 3319 | if (r) |
| 3320 | return r; |
| 3321 | |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 3322 | write_lock(&vcpu->kvm->mmu_lock); |
| 3323 | r = make_mmu_pages_available(vcpu); |
| 3324 | if (r < 0) |
| 3325 | goto out_unlock; |
| 3326 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3327 | /* |
| 3328 | * Do we shadow a long mode page table? If so we need to |
| 3329 | * write-protect the guests page table root. |
| 3330 | */ |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3331 | if (mmu->root_level >= PT64_ROOT_4LEVEL) { |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3332 | root = mmu_alloc_root(vcpu, root_gfn, 0, |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3333 | mmu->shadow_root_level, false); |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3334 | mmu->root_hpa = root; |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3335 | goto set_root_pgd; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3336 | } |
Joerg Roedel | f87f928 | 2010-09-02 17:29:45 +0200 | [diff] [blame] | 3337 | |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 3338 | if (WARN_ON_ONCE(!mmu->pae_root)) { |
| 3339 | r = -EIO; |
| 3340 | goto out_unlock; |
| 3341 | } |
Sean Christopherson | 73ad160 | 2021-03-04 17:11:01 -0800 | [diff] [blame] | 3342 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3343 | /* |
| 3344 | * We shadow a 32 bit page table. This may be a legacy 2-level |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3345 | * or a PAE 3-level page table. In either case we need to be aware that |
| 3346 | * the shadow page table may be a PAE or a long mode page table. |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3347 | */ |
Sean Christopherson | 17e368d | 2021-03-04 17:10:54 -0800 | [diff] [blame] | 3348 | pm_mask = PT_PRESENT_MASK | shadow_me_mask; |
Sean Christopherson | 748e52b | 2021-03-04 17:10:49 -0800 | [diff] [blame] | 3349 | if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) { |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3350 | pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; |
| 3351 | |
Sean Christopherson | 03ca458 | 2021-05-05 13:42:21 -0700 | [diff] [blame] | 3352 | if (WARN_ON_ONCE(!mmu->pml4_root)) { |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 3353 | r = -EIO; |
| 3354 | goto out_unlock; |
| 3355 | } |
Sean Christopherson | 73ad160 | 2021-03-04 17:11:01 -0800 | [diff] [blame] | 3356 | |
Sean Christopherson | 03ca458 | 2021-05-05 13:42:21 -0700 | [diff] [blame] | 3357 | mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask; |
Sean Christopherson | 04d4555 | 2021-03-04 17:10:46 -0800 | [diff] [blame] | 3358 | } |
| 3359 | |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3360 | for (i = 0; i < 4; ++i) { |
Sean Christopherson | c834e5e4 | 2021-03-09 14:42:06 -0800 | [diff] [blame] | 3361 | WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); |
Sean Christopherson | 6e6ec58 | 2021-03-04 17:10:50 -0800 | [diff] [blame] | 3362 | |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3363 | if (mmu->root_level == PT32E_ROOT_LEVEL) { |
Sean Christopherson | 6e0918a | 2021-03-04 17:10:51 -0800 | [diff] [blame] | 3364 | if (!(pdptrs[i] & PT_PRESENT_MASK)) { |
Sean Christopherson | c834e5e4 | 2021-03-09 14:42:06 -0800 | [diff] [blame] | 3365 | mmu->pae_root[i] = INVALID_PAE_ROOT; |
Avi Kivity | 417726a | 2007-04-12 17:35:58 +0300 | [diff] [blame] | 3366 | continue; |
| 3367 | } |
Sean Christopherson | 6e0918a | 2021-03-04 17:10:51 -0800 | [diff] [blame] | 3368 | root_gfn = pdptrs[i] >> PAGE_SHIFT; |
Eric Northup | 5a7388c | 2010-04-26 17:00:05 -0700 | [diff] [blame] | 3369 | } |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 3370 | |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3371 | root = mmu_alloc_root(vcpu, root_gfn, i << 30, |
| 3372 | PT32_ROOT_LEVEL, false); |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3373 | mmu->pae_root[i] = root | pm_mask; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3374 | } |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3375 | |
Sean Christopherson | ba0a194 | 2021-03-04 17:10:48 -0800 | [diff] [blame] | 3376 | if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) |
Sean Christopherson | 03ca458 | 2021-05-05 13:42:21 -0700 | [diff] [blame] | 3377 | mmu->root_hpa = __pa(mmu->pml4_root); |
Sean Christopherson | ba0a194 | 2021-03-04 17:10:48 -0800 | [diff] [blame] | 3378 | else |
| 3379 | mmu->root_hpa = __pa(mmu->pae_root); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3380 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3381 | set_root_pgd: |
Sean Christopherson | b37233c | 2021-03-04 17:10:47 -0800 | [diff] [blame] | 3382 | mmu->root_pgd = root_pgd; |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 3383 | out_unlock: |
| 3384 | write_unlock(&vcpu->kvm->mmu_lock); |
Vitaly Kuznetsov | ad7dc69 | 2019-02-22 17:45:01 +0100 | [diff] [blame] | 3385 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3386 | return 0; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3387 | } |
| 3388 | |
Sean Christopherson | 748e52b | 2021-03-04 17:10:49 -0800 | [diff] [blame] | 3389 | static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3390 | { |
Sean Christopherson | 748e52b | 2021-03-04 17:10:49 -0800 | [diff] [blame] | 3391 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Sean Christopherson | 03ca458 | 2021-05-05 13:42:21 -0700 | [diff] [blame] | 3392 | u64 *pml4_root, *pae_root; |
Sean Christopherson | 748e52b | 2021-03-04 17:10:49 -0800 | [diff] [blame] | 3393 | |
| 3394 | /* |
| 3395 | * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP |
| 3396 | * tables are allocated and initialized at root creation as there is no |
| 3397 | * equivalent level in the guest's NPT to shadow. Allocate the tables |
| 3398 | * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare. |
| 3399 | */ |
| 3400 | if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL || |
| 3401 | mmu->shadow_root_level < PT64_ROOT_4LEVEL) |
| 3402 | return 0; |
| 3403 | |
| 3404 | /* |
| 3405 | * This mess only works with 4-level paging and needs to be updated to |
| 3406 | * work with 5-level paging. |
| 3407 | */ |
| 3408 | if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL)) |
| 3409 | return -EIO; |
| 3410 | |
Sean Christopherson | 03ca458 | 2021-05-05 13:42:21 -0700 | [diff] [blame] | 3411 | if (mmu->pae_root && mmu->pml4_root) |
Sean Christopherson | 748e52b | 2021-03-04 17:10:49 -0800 | [diff] [blame] | 3412 | return 0; |
| 3413 | |
| 3414 | /* |
| 3415 | * The special roots should always be allocated in concert. Yell and |
| 3416 | * bail if KVM ends up in a state where only one of the roots is valid. |
| 3417 | */ |
Sean Christopherson | 03ca458 | 2021-05-05 13:42:21 -0700 | [diff] [blame] | 3418 | if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root)) |
Sean Christopherson | 748e52b | 2021-03-04 17:10:49 -0800 | [diff] [blame] | 3419 | return -EIO; |
| 3420 | |
Sean Christopherson | 4a98623 | 2021-03-09 14:42:07 -0800 | [diff] [blame] | 3421 | /* |
| 3422 | * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and |
| 3423 | * doesn't need to be decrypted. |
| 3424 | */ |
Sean Christopherson | 748e52b | 2021-03-04 17:10:49 -0800 | [diff] [blame] | 3425 | pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); |
| 3426 | if (!pae_root) |
| 3427 | return -ENOMEM; |
| 3428 | |
Sean Christopherson | 03ca458 | 2021-05-05 13:42:21 -0700 | [diff] [blame] | 3429 | pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); |
| 3430 | if (!pml4_root) { |
Sean Christopherson | 748e52b | 2021-03-04 17:10:49 -0800 | [diff] [blame] | 3431 | free_page((unsigned long)pae_root); |
| 3432 | return -ENOMEM; |
| 3433 | } |
| 3434 | |
| 3435 | mmu->pae_root = pae_root; |
Sean Christopherson | 03ca458 | 2021-05-05 13:42:21 -0700 | [diff] [blame] | 3436 | mmu->pml4_root = pml4_root; |
Sean Christopherson | 748e52b | 2021-03-04 17:10:49 -0800 | [diff] [blame] | 3437 | |
| 3438 | return 0; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3439 | } |
| 3440 | |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3441 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3442 | { |
| 3443 | int i; |
| 3444 | struct kvm_mmu_page *sp; |
| 3445 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3446 | if (vcpu->arch.mmu->direct_map) |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3447 | return; |
| 3448 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3449 | if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3450 | return; |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 3451 | |
David Matlack | 56f17dd | 2014-08-18 15:46:07 -0700 | [diff] [blame] | 3452 | vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3453 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3454 | if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { |
| 3455 | hpa_t root = vcpu->arch.mmu->root_hpa; |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 3456 | sp = to_shadow_page(root); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3457 | |
| 3458 | /* |
| 3459 | * Even if another CPU was marking the SP as unsync-ed |
| 3460 | * simultaneously, any guest page table changes are not |
| 3461 | * guaranteed to be visible anyway until this VCPU issues a TLB |
| 3462 | * flush strictly after those changes are made. We only need to |
| 3463 | * ensure that the other CPU sets these flags before any actual |
| 3464 | * changes to the page tables are made. The comments in |
| 3465 | * mmu_need_write_protect() describe what could go wrong if this |
| 3466 | * requirement isn't satisfied. |
| 3467 | */ |
| 3468 | if (!smp_load_acquire(&sp->unsync) && |
| 3469 | !smp_load_acquire(&sp->unsync_children)) |
| 3470 | return; |
| 3471 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 3472 | write_lock(&vcpu->kvm->mmu_lock); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3473 | kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); |
| 3474 | |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3475 | mmu_sync_children(vcpu, sp); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3476 | |
Xiao Guangrong | 0375f7f | 2011-11-28 20:41:00 +0800 | [diff] [blame] | 3477 | kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 3478 | write_unlock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3479 | return; |
| 3480 | } |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3481 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 3482 | write_lock(&vcpu->kvm->mmu_lock); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3483 | kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); |
| 3484 | |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3485 | for (i = 0; i < 4; ++i) { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3486 | hpa_t root = vcpu->arch.mmu->pae_root[i]; |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3487 | |
Sean Christopherson | c834e5e4 | 2021-03-09 14:42:06 -0800 | [diff] [blame] | 3488 | if (IS_VALID_PAE_ROOT(root)) { |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3489 | root &= PT64_BASE_ADDR_MASK; |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 3490 | sp = to_shadow_page(root); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3491 | mmu_sync_children(vcpu, sp); |
| 3492 | } |
| 3493 | } |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3494 | |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3495 | kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 3496 | write_unlock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3497 | } |
| 3498 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3499 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr, |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3500 | u32 access, struct x86_exception *exception) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3501 | { |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3502 | if (exception) |
| 3503 | exception->error_code = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3504 | return vaddr; |
| 3505 | } |
| 3506 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3507 | static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr, |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3508 | u32 access, |
| 3509 | struct x86_exception *exception) |
Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 3510 | { |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3511 | if (exception) |
| 3512 | exception->error_code = 0; |
Paolo Bonzini | 54987b7 | 2014-09-02 13:23:06 +0200 | [diff] [blame] | 3513 | return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); |
Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 3514 | } |
| 3515 | |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3516 | static bool |
| 3517 | __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) |
| 3518 | { |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3519 | int bit7 = (pte >> 7) & 1; |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3520 | |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3521 | return pte & rsvd_check->rsvd_bits_mask[bit7][level-1]; |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3522 | } |
| 3523 | |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3524 | static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte) |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3525 | { |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3526 | return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f); |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3527 | } |
| 3528 | |
Takuya Yoshikawa | ded5874 | 2016-02-22 17:23:40 +0900 | [diff] [blame] | 3529 | static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3530 | { |
Paolo Bonzini | 9034e6e | 2017-08-17 18:36:58 +0200 | [diff] [blame] | 3531 | /* |
| 3532 | * A nested guest cannot use the MMIO cache if it is using nested |
| 3533 | * page tables, because cr2 is a nGPA while the cache stores GPAs. |
| 3534 | */ |
| 3535 | if (mmu_is_nested(vcpu)) |
| 3536 | return false; |
| 3537 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3538 | if (direct) |
| 3539 | return vcpu_match_mmio_gpa(vcpu, addr); |
| 3540 | |
| 3541 | return vcpu_match_mmio_gva(vcpu, addr); |
| 3542 | } |
| 3543 | |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3544 | /* |
| 3545 | * Return the level of the lowest level SPTE added to sptes. |
| 3546 | * That SPTE may be non-present. |
| 3547 | */ |
Sean Christopherson | 39b4d43 | 2020-12-17 16:31:37 -0800 | [diff] [blame] | 3548 | static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3549 | { |
| 3550 | struct kvm_shadow_walk_iterator iterator; |
Sean Christopherson | 2aa07893 | 2020-12-17 16:31:36 -0800 | [diff] [blame] | 3551 | int leaf = -1; |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3552 | u64 spte; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3553 | |
| 3554 | walk_shadow_page_lockless_begin(vcpu); |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3555 | |
Sean Christopherson | 39b4d43 | 2020-12-17 16:31:37 -0800 | [diff] [blame] | 3556 | for (shadow_walk_init(&iterator, vcpu, addr), |
| 3557 | *root_level = iterator.level; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3558 | shadow_walk_okay(&iterator); |
| 3559 | __shadow_walk_next(&iterator, spte)) { |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3560 | leaf = iterator.level; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3561 | spte = mmu_spte_get_lockless(iterator.sptep); |
| 3562 | |
Sean Christopherson | dde81f9 | 2020-12-17 16:31:38 -0800 | [diff] [blame] | 3563 | sptes[leaf] = spte; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3564 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3565 | if (!is_shadow_present_pte(spte)) |
| 3566 | break; |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3567 | } |
| 3568 | |
| 3569 | walk_shadow_page_lockless_end(vcpu); |
| 3570 | |
| 3571 | return leaf; |
| 3572 | } |
| 3573 | |
Sean Christopherson | 9aa4187 | 2020-12-17 16:31:39 -0800 | [diff] [blame] | 3574 | /* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */ |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3575 | static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) |
| 3576 | { |
Sean Christopherson | dde81f9 | 2020-12-17 16:31:38 -0800 | [diff] [blame] | 3577 | u64 sptes[PT64_ROOT_MAX_LEVEL + 1]; |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3578 | struct rsvd_bits_validate *rsvd_check; |
Sean Christopherson | 39b4d43 | 2020-12-17 16:31:37 -0800 | [diff] [blame] | 3579 | int root, leaf, level; |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3580 | bool reserved = false; |
| 3581 | |
David Matlack | 63c0cac | 2021-06-17 23:19:47 +0000 | [diff] [blame] | 3582 | if (is_tdp_mmu(vcpu->arch.mmu)) |
Sean Christopherson | 39b4d43 | 2020-12-17 16:31:37 -0800 | [diff] [blame] | 3583 | leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root); |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3584 | else |
Sean Christopherson | 39b4d43 | 2020-12-17 16:31:37 -0800 | [diff] [blame] | 3585 | leaf = get_walk(vcpu, addr, sptes, &root); |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3586 | |
Sean Christopherson | 2aa07893 | 2020-12-17 16:31:36 -0800 | [diff] [blame] | 3587 | if (unlikely(leaf < 0)) { |
| 3588 | *sptep = 0ull; |
| 3589 | return reserved; |
| 3590 | } |
| 3591 | |
Sean Christopherson | 9aa4187 | 2020-12-17 16:31:39 -0800 | [diff] [blame] | 3592 | *sptep = sptes[leaf]; |
| 3593 | |
| 3594 | /* |
| 3595 | * Skip reserved bits checks on the terminal leaf if it's not a valid |
| 3596 | * SPTE. Note, this also (intentionally) skips MMIO SPTEs, which, by |
| 3597 | * design, always have reserved bits set. The purpose of the checks is |
| 3598 | * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs. |
| 3599 | */ |
| 3600 | if (!is_shadow_present_pte(sptes[leaf])) |
| 3601 | leaf++; |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3602 | |
| 3603 | rsvd_check = &vcpu->arch.mmu->shadow_zero_check; |
| 3604 | |
Sean Christopherson | 9aa4187 | 2020-12-17 16:31:39 -0800 | [diff] [blame] | 3605 | for (level = root; level >= leaf; level--) |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3606 | /* |
| 3607 | * Use a bitwise-OR instead of a logical-OR to aggregate the |
| 3608 | * reserved bit and EPT's invalid memtype/XWR checks to avoid |
| 3609 | * adding a Jcc in the loop. |
| 3610 | */ |
Sean Christopherson | dde81f9 | 2020-12-17 16:31:38 -0800 | [diff] [blame] | 3611 | reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) | |
| 3612 | __is_rsvd_bits_set(rsvd_check, sptes[level], level); |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3613 | |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3614 | if (reserved) { |
Sean Christopherson | bb4cdf3 | 2021-02-25 12:47:49 -0800 | [diff] [blame] | 3615 | pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n", |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3616 | __func__, addr); |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3617 | for (level = root; level >= leaf; level--) |
Sean Christopherson | bb4cdf3 | 2021-02-25 12:47:49 -0800 | [diff] [blame] | 3618 | pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx", |
| 3619 | sptes[level], level, |
| 3620 | rsvd_check->rsvd_bits_mask[(sptes[level] >> 7) & 1][level-1]); |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3621 | } |
Sean Christopherson | ddce620 | 2019-12-06 15:57:27 -0800 | [diff] [blame] | 3622 | |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3623 | return reserved; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3624 | } |
| 3625 | |
Paolo Bonzini | e08d26f | 2017-08-17 18:36:56 +0200 | [diff] [blame] | 3626 | static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3627 | { |
| 3628 | u64 spte; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3629 | bool reserved; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3630 | |
Takuya Yoshikawa | ded5874 | 2016-02-22 17:23:40 +0900 | [diff] [blame] | 3631 | if (mmio_info_in_cache(vcpu, addr, direct)) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3632 | return RET_PF_EMULATE; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3633 | |
Ben Gardon | 95fb5b0 | 2020-10-14 11:26:58 -0700 | [diff] [blame] | 3634 | reserved = get_mmio_spte(vcpu, addr, &spte); |
Paolo Bonzini | 450869d | 2015-11-04 13:41:21 +0100 | [diff] [blame] | 3635 | if (WARN_ON(reserved)) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3636 | return -EINVAL; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3637 | |
| 3638 | if (is_mmio_spte(spte)) { |
| 3639 | gfn_t gfn = get_mmio_spte_gfn(spte); |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 3640 | unsigned int access = get_mmio_spte_access(spte); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3641 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 3642 | if (!check_mmio_spte(vcpu, spte)) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3643 | return RET_PF_INVALID; |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 3644 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3645 | if (direct) |
| 3646 | addr = 0; |
Xiao Guangrong | 4f02264 | 2011-07-12 03:34:24 +0800 | [diff] [blame] | 3647 | |
| 3648 | trace_handle_mmio_page_fault(addr, gfn, access); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3649 | vcpu_cache_mmio_info(vcpu, addr, gfn, access); |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3650 | return RET_PF_EMULATE; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3651 | } |
| 3652 | |
| 3653 | /* |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3654 | * If the page table is zapped by other cpus, let CPU fault again on |
| 3655 | * the address. |
| 3656 | */ |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3657 | return RET_PF_RETRY; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3658 | } |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3659 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 3660 | static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, |
| 3661 | u32 error_code, gfn_t gfn) |
| 3662 | { |
| 3663 | if (unlikely(error_code & PFERR_RSVD_MASK)) |
| 3664 | return false; |
| 3665 | |
| 3666 | if (!(error_code & PFERR_PRESENT_MASK) || |
| 3667 | !(error_code & PFERR_WRITE_MASK)) |
| 3668 | return false; |
| 3669 | |
| 3670 | /* |
| 3671 | * guest is writing the page which is write tracked which can |
| 3672 | * not be fixed by page fault handler. |
| 3673 | */ |
| 3674 | if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) |
| 3675 | return true; |
| 3676 | |
| 3677 | return false; |
| 3678 | } |
| 3679 | |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 3680 | static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) |
| 3681 | { |
| 3682 | struct kvm_shadow_walk_iterator iterator; |
| 3683 | u64 spte; |
| 3684 | |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 3685 | walk_shadow_page_lockless_begin(vcpu); |
| 3686 | for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { |
| 3687 | clear_sp_write_flooding_count(iterator.sptep); |
| 3688 | if (!is_shadow_present_pte(spte)) |
| 3689 | break; |
| 3690 | } |
| 3691 | walk_shadow_page_lockless_end(vcpu); |
| 3692 | } |
| 3693 | |
Vitaly Kuznetsov | e8c2226 | 2020-06-15 14:13:34 +0200 | [diff] [blame] | 3694 | static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
| 3695 | gfn_t gfn) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3696 | { |
| 3697 | struct kvm_arch_async_pf arch; |
Xiao Guangrong | fb67e14 | 2010-12-07 10:35:25 +0800 | [diff] [blame] | 3698 | |
Gleb Natapov | 7c90705 | 2010-10-14 11:22:53 +0200 | [diff] [blame] | 3699 | arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3700 | arch.gfn = gfn; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3701 | arch.direct_map = vcpu->arch.mmu->direct_map; |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 3702 | arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3703 | |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 3704 | return kvm_setup_async_pf(vcpu, cr2_or_gpa, |
| 3705 | kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3706 | } |
| 3707 | |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 3708 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 3709 | gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva, |
| 3710 | bool write, bool *writable) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3711 | { |
Paolo Bonzini | c36b715 | 2020-04-16 09:48:07 -0400 | [diff] [blame] | 3712 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3713 | bool async; |
| 3714 | |
Sean Christopherson | e0c3786 | 2021-02-25 12:47:30 -0800 | [diff] [blame] | 3715 | /* |
| 3716 | * Retry the page fault if the gfn hit a memslot that is being deleted |
| 3717 | * or moved. This ensures any existing SPTEs for the old memslot will |
| 3718 | * be zapped before KVM inserts a new MMIO SPTE for the gfn. |
| 3719 | */ |
| 3720 | if (slot && (slot->flags & KVM_MEMSLOT_INVALID)) |
| 3721 | return true; |
| 3722 | |
Paolo Bonzini | c36b715 | 2020-04-16 09:48:07 -0400 | [diff] [blame] | 3723 | /* Don't expose private memslots to L2. */ |
| 3724 | if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) { |
Jim Mattson | 3a2936d | 2018-05-09 17:02:05 -0400 | [diff] [blame] | 3725 | *pfn = KVM_PFN_NOSLOT; |
Sean Christopherson | c583eed | 2020-04-15 14:44:13 -0700 | [diff] [blame] | 3726 | *writable = false; |
Jim Mattson | 3a2936d | 2018-05-09 17:02:05 -0400 | [diff] [blame] | 3727 | return false; |
| 3728 | } |
| 3729 | |
Paolo Bonzini | 3520469 | 2015-04-02 11:20:48 +0200 | [diff] [blame] | 3730 | async = false; |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 3731 | *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, |
| 3732 | write, writable, hva); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3733 | if (!async) |
| 3734 | return false; /* *pfn has correct page already */ |
| 3735 | |
Wanpeng Li | 9bc1f09 | 2017-06-08 20:13:40 -0700 | [diff] [blame] | 3736 | if (!prefault && kvm_can_do_async_pf(vcpu)) { |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 3737 | trace_kvm_try_async_get_page(cr2_or_gpa, gfn); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3738 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 3739 | trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3740 | kvm_make_request(KVM_REQ_APF_HALT, vcpu); |
| 3741 | return true; |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 3742 | } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn)) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3743 | return true; |
| 3744 | } |
| 3745 | |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 3746 | *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, |
| 3747 | write, writable, hva); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 3748 | return false; |
| 3749 | } |
| 3750 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 3751 | static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, |
| 3752 | bool prefault, int max_level, bool is_tdp) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3753 | { |
David Matlack | 63c0cac | 2021-06-17 23:19:47 +0000 | [diff] [blame] | 3754 | bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 3755 | bool write = error_code & PFERR_WRITE_MASK; |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 3756 | bool map_writable; |
Avi Kivity | ebeace8 | 2007-01-05 16:36:47 -0800 | [diff] [blame] | 3757 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 3758 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 3759 | unsigned long mmu_seq; |
| 3760 | kvm_pfn_t pfn; |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 3761 | hva_t hva; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3762 | int r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3763 | |
| 3764 | if (page_fault_handle_page_track(vcpu, error_code, gfn)) |
| 3765 | return RET_PF_EMULATE; |
| 3766 | |
David Matlack | 0b873fd | 2021-06-17 23:19:46 +0000 | [diff] [blame] | 3767 | if (!is_tdp_mmu_fault) { |
Ben Gardon | bb18842 | 2020-10-14 11:26:50 -0700 | [diff] [blame] | 3768 | r = fast_page_fault(vcpu, gpa, error_code); |
| 3769 | if (r != RET_PF_INVALID) |
| 3770 | return r; |
| 3771 | } |
Sean Christopherson | 8329144 | 2020-07-02 19:35:30 -0700 | [diff] [blame] | 3772 | |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 3773 | r = mmu_topup_memory_caches(vcpu, false); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3774 | if (r) |
| 3775 | return r; |
| 3776 | |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 3777 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
| 3778 | smp_rmb(); |
| 3779 | |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 3780 | if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva, |
| 3781 | write, &map_writable)) |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 3782 | return RET_PF_RETRY; |
| 3783 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 3784 | if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r)) |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 3785 | return r; |
| 3786 | |
| 3787 | r = RET_PF_RETRY; |
Ben Gardon | a2855af | 2021-02-02 10:57:29 -0800 | [diff] [blame] | 3788 | |
David Matlack | 0b873fd | 2021-06-17 23:19:46 +0000 | [diff] [blame] | 3789 | if (is_tdp_mmu_fault) |
Ben Gardon | a2855af | 2021-02-02 10:57:29 -0800 | [diff] [blame] | 3790 | read_lock(&vcpu->kvm->mmu_lock); |
| 3791 | else |
| 3792 | write_lock(&vcpu->kvm->mmu_lock); |
| 3793 | |
David Stevens | 4a42d84 | 2021-02-22 11:45:22 +0900 | [diff] [blame] | 3794 | if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva)) |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 3795 | goto out_unlock; |
Sean Christopherson | 7bd7ded | 2020-06-23 12:35:42 -0700 | [diff] [blame] | 3796 | r = make_mmu_pages_available(vcpu); |
| 3797 | if (r) |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 3798 | goto out_unlock; |
Ben Gardon | bb18842 | 2020-10-14 11:26:50 -0700 | [diff] [blame] | 3799 | |
David Matlack | 0b873fd | 2021-06-17 23:19:46 +0000 | [diff] [blame] | 3800 | if (is_tdp_mmu_fault) |
Ben Gardon | bb18842 | 2020-10-14 11:26:50 -0700 | [diff] [blame] | 3801 | r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level, |
| 3802 | pfn, prefault); |
| 3803 | else |
| 3804 | r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn, |
| 3805 | prefault, is_tdp); |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 3806 | |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 3807 | out_unlock: |
David Matlack | 0b873fd | 2021-06-17 23:19:46 +0000 | [diff] [blame] | 3808 | if (is_tdp_mmu_fault) |
Ben Gardon | a2855af | 2021-02-02 10:57:29 -0800 | [diff] [blame] | 3809 | read_unlock(&vcpu->kvm->mmu_lock); |
| 3810 | else |
| 3811 | write_unlock(&vcpu->kvm->mmu_lock); |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 3812 | kvm_release_pfn_clean(pfn); |
| 3813 | return r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3814 | } |
| 3815 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 3816 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, |
| 3817 | u32 error_code, bool prefault) |
| 3818 | { |
| 3819 | pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code); |
| 3820 | |
| 3821 | /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */ |
| 3822 | return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault, |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3823 | PG_LEVEL_2M, false); |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 3824 | } |
| 3825 | |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 3826 | int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 3827 | u64 fault_address, char *insn, int insn_len) |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 3828 | { |
| 3829 | int r = 1; |
Vitaly Kuznetsov | 9ce372b | 2020-05-07 16:36:02 +0200 | [diff] [blame] | 3830 | u32 flags = vcpu->arch.apf.host_apf_flags; |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 3831 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3832 | #ifndef CONFIG_X86_64 |
| 3833 | /* A 64-bit CR2 should be impossible on 32-bit KVM. */ |
| 3834 | if (WARN_ON_ONCE(fault_address >> 32)) |
| 3835 | return -EFAULT; |
| 3836 | #endif |
| 3837 | |
Paolo Bonzini | c595cee | 2018-07-02 13:07:14 +0200 | [diff] [blame] | 3838 | vcpu->arch.l1tf_flush_l1d = true; |
Vitaly Kuznetsov | 9ce372b | 2020-05-07 16:36:02 +0200 | [diff] [blame] | 3839 | if (!flags) { |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 3840 | trace_kvm_page_fault(fault_address, error_code); |
| 3841 | |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 3842 | if (kvm_event_needs_reinjection(vcpu)) |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 3843 | kvm_mmu_unprotect_page_virt(vcpu, fault_address); |
| 3844 | r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn, |
| 3845 | insn_len); |
Vitaly Kuznetsov | 9ce372b | 2020-05-07 16:36:02 +0200 | [diff] [blame] | 3846 | } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) { |
Vitaly Kuznetsov | 68fd66f | 2020-05-25 16:41:17 +0200 | [diff] [blame] | 3847 | vcpu->arch.apf.host_apf_flags = 0; |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 3848 | local_irq_disable(); |
Thomas Gleixner | 6bca69a | 2020-03-07 00:42:06 +0100 | [diff] [blame] | 3849 | kvm_async_pf_task_wait_schedule(fault_address); |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 3850 | local_irq_enable(); |
Vitaly Kuznetsov | 9ce372b | 2020-05-07 16:36:02 +0200 | [diff] [blame] | 3851 | } else { |
| 3852 | WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags); |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 3853 | } |
Vitaly Kuznetsov | 9ce372b | 2020-05-07 16:36:02 +0200 | [diff] [blame] | 3854 | |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 3855 | return r; |
| 3856 | } |
| 3857 | EXPORT_SYMBOL_GPL(kvm_handle_page_fault); |
| 3858 | |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 3859 | int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, |
| 3860 | bool prefault) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 3861 | { |
Sean Christopherson | cb9b88c | 2019-12-06 15:57:18 -0800 | [diff] [blame] | 3862 | int max_level; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 3863 | |
Sean Christopherson | e662ec3 | 2020-04-27 17:54:21 -0700 | [diff] [blame] | 3864 | for (max_level = KVM_MAX_HUGEPAGE_LEVEL; |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3865 | max_level > PG_LEVEL_4K; |
Sean Christopherson | cb9b88c | 2019-12-06 15:57:18 -0800 | [diff] [blame] | 3866 | max_level--) { |
| 3867 | int page_num = KVM_PAGES_PER_HPAGE(max_level); |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 3868 | gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 3869 | |
Sean Christopherson | cb9b88c | 2019-12-06 15:57:18 -0800 | [diff] [blame] | 3870 | if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num)) |
| 3871 | break; |
Takuya Yoshikawa | fd13690 | 2015-10-16 17:06:02 +0900 | [diff] [blame] | 3872 | } |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 3873 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 3874 | return direct_page_fault(vcpu, gpa, error_code, prefault, |
| 3875 | max_level, true); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 3876 | } |
| 3877 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 3878 | static void nonpaging_init_context(struct kvm_vcpu *vcpu, |
| 3879 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3880 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3881 | context->page_fault = nonpaging_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3882 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 3883 | context->sync_page = nonpaging_sync_page; |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 3884 | context->invlpg = NULL; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 3885 | context->root_level = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3886 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 3887 | context->direct_map = true; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 3888 | context->nx = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3889 | } |
| 3890 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3891 | static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd, |
Sean Christopherson | 0be4435 | 2020-02-28 14:52:40 -0800 | [diff] [blame] | 3892 | union kvm_mmu_page_role role) |
| 3893 | { |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3894 | return (role.direct || pgd == root->pgd) && |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 3895 | VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) && |
| 3896 | role.word == to_shadow_page(root->hpa)->role.word; |
Sean Christopherson | 0be4435 | 2020-02-28 14:52:40 -0800 | [diff] [blame] | 3897 | } |
| 3898 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3899 | /* |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3900 | * Find out if a previously cached root matching the new pgd/role is available. |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3901 | * The current root is also inserted into the cache. |
| 3902 | * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is |
| 3903 | * returned. |
| 3904 | * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and |
| 3905 | * false is returned. This root should now be freed by the caller. |
| 3906 | */ |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3907 | static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd, |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3908 | union kvm_mmu_page_role new_role) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3909 | { |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3910 | uint i; |
| 3911 | struct kvm_mmu_root_info root; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3912 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3913 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3914 | root.pgd = mmu->root_pgd; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3915 | root.hpa = mmu->root_hpa; |
| 3916 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3917 | if (is_root_usable(&root, new_pgd, new_role)) |
Sean Christopherson | 0be4435 | 2020-02-28 14:52:40 -0800 | [diff] [blame] | 3918 | return true; |
| 3919 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3920 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { |
| 3921 | swap(root, mmu->prev_roots[i]); |
| 3922 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3923 | if (is_root_usable(&root, new_pgd, new_role)) |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3924 | break; |
| 3925 | } |
| 3926 | |
| 3927 | mmu->root_hpa = root.hpa; |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3928 | mmu->root_pgd = root.pgd; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3929 | |
| 3930 | return i < KVM_MMU_NUM_PREV_ROOTS; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3931 | } |
| 3932 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3933 | static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd, |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 3934 | union kvm_mmu_page_role new_role) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3935 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3936 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 3937 | |
| 3938 | /* |
| 3939 | * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid |
| 3940 | * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs |
| 3941 | * later if necessary. |
| 3942 | */ |
| 3943 | if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 3944 | mmu->root_level >= PT64_ROOT_4LEVEL) |
Vitaly Kuznetsov | fe9304d | 2020-07-10 16:11:57 +0200 | [diff] [blame] | 3945 | return cached_root_available(vcpu, new_pgd, new_role); |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 3946 | |
| 3947 | return false; |
| 3948 | } |
| 3949 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3950 | static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, |
Sean Christopherson | b512910 | 2021-06-09 16:42:27 -0700 | [diff] [blame] | 3951 | union kvm_mmu_page_role new_role) |
Junaid Shahid | 0aab33e | 2018-06-27 14:59:09 -0700 | [diff] [blame] | 3952 | { |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3953 | if (!fast_pgd_switch(vcpu, new_pgd, new_role)) { |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 3954 | kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT); |
| 3955 | return; |
| 3956 | } |
| 3957 | |
| 3958 | /* |
| 3959 | * It's possible that the cached previous root page is obsolete because |
| 3960 | * of a change in the MMU generation number. However, changing the |
| 3961 | * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will |
| 3962 | * free the root set here and allocate a new one. |
| 3963 | */ |
| 3964 | kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); |
| 3965 | |
Sean Christopherson | b512910 | 2021-06-09 16:42:27 -0700 | [diff] [blame] | 3966 | if (force_flush_and_sync_on_reuse) { |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 3967 | kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); |
| 3968 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Sean Christopherson | b512910 | 2021-06-09 16:42:27 -0700 | [diff] [blame] | 3969 | } |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 3970 | |
| 3971 | /* |
| 3972 | * The last MMIO access's GVA and GPA are cached in the VCPU. When |
| 3973 | * switching to a new CR3, that GVA->GPA mapping may no longer be |
| 3974 | * valid. So clear any cached MMIO info even when we don't need to sync |
| 3975 | * the shadow page tables. |
| 3976 | */ |
| 3977 | vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); |
| 3978 | |
Ben Gardon | daa5b6c | 2020-10-14 11:26:59 -0700 | [diff] [blame] | 3979 | /* |
| 3980 | * If this is a direct root page, it doesn't have a write flooding |
| 3981 | * count. Otherwise, clear the write flooding count. |
| 3982 | */ |
| 3983 | if (!new_role.direct) |
| 3984 | __clear_sp_write_flooding_count( |
| 3985 | to_shadow_page(vcpu->arch.mmu->root_hpa)); |
Junaid Shahid | 0aab33e | 2018-06-27 14:59:09 -0700 | [diff] [blame] | 3986 | } |
| 3987 | |
Sean Christopherson | b512910 | 2021-06-09 16:42:27 -0700 | [diff] [blame] | 3988 | void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd) |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 3989 | { |
Sean Christopherson | b512910 | 2021-06-09 16:42:27 -0700 | [diff] [blame] | 3990 | __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3991 | } |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3992 | EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3993 | |
Joerg Roedel | 5777ed3 | 2010-09-10 17:30:42 +0200 | [diff] [blame] | 3994 | static unsigned long get_cr3(struct kvm_vcpu *vcpu) |
| 3995 | { |
Avi Kivity | 9f8fe50 | 2010-12-05 17:30:00 +0200 | [diff] [blame] | 3996 | return kvm_read_cr3(vcpu); |
Joerg Roedel | 5777ed3 | 2010-09-10 17:30:42 +0200 | [diff] [blame] | 3997 | } |
| 3998 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 3999 | static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 4000 | unsigned int access, int *nr_present) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4001 | { |
| 4002 | if (unlikely(is_mmio_spte(*sptep))) { |
| 4003 | if (gfn != get_mmio_spte_gfn(*sptep)) { |
| 4004 | mmu_spte_clear_no_track(sptep); |
| 4005 | return true; |
| 4006 | } |
| 4007 | |
| 4008 | (*nr_present)++; |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 4009 | mark_mmio_spte(vcpu, sptep, gfn, access); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4010 | return true; |
| 4011 | } |
| 4012 | |
| 4013 | return false; |
| 4014 | } |
| 4015 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4016 | static inline bool is_last_gpte(struct kvm_mmu *mmu, |
| 4017 | unsigned level, unsigned gpte) |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4018 | { |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4019 | /* |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4020 | * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. |
| 4021 | * If it is clear, there are no large pages at this level, so clear |
| 4022 | * PT_PAGE_SIZE_MASK in gpte if that is the case. |
| 4023 | */ |
| 4024 | gpte &= level - mmu->last_nonleaf_level; |
| 4025 | |
Ladi Prosek | 829ee27 | 2017-10-05 11:10:23 +0200 | [diff] [blame] | 4026 | /* |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 4027 | * PG_LEVEL_4K always terminates. The RHS has bit 7 set |
| 4028 | * iff level <= PG_LEVEL_4K, which for our purpose means |
| 4029 | * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then. |
Ladi Prosek | 829ee27 | 2017-10-05 11:10:23 +0200 | [diff] [blame] | 4030 | */ |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 4031 | gpte |= level - PG_LEVEL_4K - 1; |
Ladi Prosek | 829ee27 | 2017-10-05 11:10:23 +0200 | [diff] [blame] | 4032 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4033 | return gpte & PT_PAGE_SIZE_MASK; |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4034 | } |
| 4035 | |
Nadav Har'El | 37406aa | 2013-08-05 11:07:12 +0300 | [diff] [blame] | 4036 | #define PTTYPE_EPT 18 /* arbitrary */ |
| 4037 | #define PTTYPE PTTYPE_EPT |
| 4038 | #include "paging_tmpl.h" |
| 4039 | #undef PTTYPE |
| 4040 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4041 | #define PTTYPE 64 |
| 4042 | #include "paging_tmpl.h" |
| 4043 | #undef PTTYPE |
| 4044 | |
| 4045 | #define PTTYPE 32 |
| 4046 | #include "paging_tmpl.h" |
| 4047 | #undef PTTYPE |
| 4048 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4049 | static void |
| 4050 | __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, |
| 4051 | struct rsvd_bits_validate *rsvd_check, |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4052 | u64 pa_bits_rsvd, int level, bool nx, bool gbpages, |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4053 | bool pse, bool amd) |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4054 | { |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 4055 | u64 gbpages_bit_rsvd = 0; |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 4056 | u64 nonleaf_bit8_rsvd = 0; |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4057 | u64 high_bits_rsvd; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4058 | |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4059 | rsvd_check->bad_mt_xwr = 0; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4060 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4061 | if (!gbpages) |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 4062 | gbpages_bit_rsvd = rsvd_bits(7, 7); |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 4063 | |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4064 | if (level == PT32E_ROOT_LEVEL) |
| 4065 | high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62); |
| 4066 | else |
| 4067 | high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); |
| 4068 | |
| 4069 | /* Note, NX doesn't exist in PDPTEs, this is handled below. */ |
| 4070 | if (!nx) |
| 4071 | high_bits_rsvd |= rsvd_bits(63, 63); |
| 4072 | |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 4073 | /* |
| 4074 | * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for |
| 4075 | * leaf entries) on AMD CPUs only. |
| 4076 | */ |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4077 | if (amd) |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 4078 | nonleaf_bit8_rsvd = rsvd_bits(8, 8); |
| 4079 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4080 | switch (level) { |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4081 | case PT32_ROOT_LEVEL: |
| 4082 | /* no rsvd bits for 2 level 4K page table entries */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4083 | rsvd_check->rsvd_bits_mask[0][1] = 0; |
| 4084 | rsvd_check->rsvd_bits_mask[0][0] = 0; |
| 4085 | rsvd_check->rsvd_bits_mask[1][0] = |
| 4086 | rsvd_check->rsvd_bits_mask[0][0]; |
Xiao Guangrong | f815bce | 2010-03-19 17:58:53 +0800 | [diff] [blame] | 4087 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4088 | if (!pse) { |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4089 | rsvd_check->rsvd_bits_mask[1][1] = 0; |
Xiao Guangrong | f815bce | 2010-03-19 17:58:53 +0800 | [diff] [blame] | 4090 | break; |
| 4091 | } |
| 4092 | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4093 | if (is_cpuid_PSE36()) |
| 4094 | /* 36bits PSE 4MB page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4095 | rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4096 | else |
| 4097 | /* 32 bits PSE 4MB page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4098 | rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4099 | break; |
| 4100 | case PT32E_ROOT_LEVEL: |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4101 | rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) | |
| 4102 | high_bits_rsvd | |
| 4103 | rsvd_bits(5, 8) | |
| 4104 | rsvd_bits(1, 2); /* PDPTE */ |
| 4105 | rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; /* PDE */ |
| 4106 | rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* PTE */ |
| 4107 | rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | |
| 4108 | rsvd_bits(13, 20); /* large page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4109 | rsvd_check->rsvd_bits_mask[1][0] = |
| 4110 | rsvd_check->rsvd_bits_mask[0][0]; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4111 | break; |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4112 | case PT64_ROOT_5LEVEL: |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4113 | rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | |
| 4114 | nonleaf_bit8_rsvd | |
| 4115 | rsvd_bits(7, 7); |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4116 | rsvd_check->rsvd_bits_mask[1][4] = |
| 4117 | rsvd_check->rsvd_bits_mask[0][4]; |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 4118 | fallthrough; |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 4119 | case PT64_ROOT_4LEVEL: |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4120 | rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | |
| 4121 | nonleaf_bit8_rsvd | |
| 4122 | rsvd_bits(7, 7); |
| 4123 | rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | |
| 4124 | gbpages_bit_rsvd; |
| 4125 | rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd; |
| 4126 | rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4127 | rsvd_check->rsvd_bits_mask[1][3] = |
| 4128 | rsvd_check->rsvd_bits_mask[0][3]; |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4129 | rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | |
| 4130 | gbpages_bit_rsvd | |
| 4131 | rsvd_bits(13, 29); |
| 4132 | rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | |
| 4133 | rsvd_bits(13, 20); /* large page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4134 | rsvd_check->rsvd_bits_mask[1][0] = |
| 4135 | rsvd_check->rsvd_bits_mask[0][0]; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4136 | break; |
| 4137 | } |
| 4138 | } |
| 4139 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4140 | static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, |
| 4141 | struct kvm_mmu *context) |
| 4142 | { |
| 4143 | __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4144 | vcpu->arch.reserved_gpa_bits, |
| 4145 | context->root_level, context->nx, |
Radim Krčmář | d6321d4 | 2017-08-05 00:12:49 +0200 | [diff] [blame] | 4146 | guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), |
Sean Christopherson | 23493d0 | 2020-03-04 17:34:33 -0800 | [diff] [blame] | 4147 | is_pse(vcpu), |
| 4148 | guest_cpuid_is_amd_or_hygon(vcpu)); |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4149 | } |
| 4150 | |
Xiao Guangrong | 81b8eeb | 2015-08-05 12:04:23 +0800 | [diff] [blame] | 4151 | static void |
| 4152 | __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4153 | u64 pa_bits_rsvd, bool execonly) |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4154 | { |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4155 | u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); |
Paolo Bonzini | 951f9fd | 2015-09-23 10:34:26 +0200 | [diff] [blame] | 4156 | u64 bad_mt_xwr; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4157 | |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4158 | rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7); |
| 4159 | rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7); |
| 4160 | rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6); |
| 4161 | rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6); |
| 4162 | rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4163 | |
| 4164 | /* large page */ |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4165 | rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4166 | rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4167 | rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29); |
| 4168 | rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4169 | rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4170 | |
Paolo Bonzini | 951f9fd | 2015-09-23 10:34:26 +0200 | [diff] [blame] | 4171 | bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ |
| 4172 | bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */ |
| 4173 | bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */ |
| 4174 | bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */ |
| 4175 | bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */ |
| 4176 | if (!execonly) { |
| 4177 | /* bits 0..2 must not be 100 unless VMX capabilities allow it */ |
| 4178 | bad_mt_xwr |= REPEAT_BYTE(1ull << 4); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4179 | } |
Paolo Bonzini | 951f9fd | 2015-09-23 10:34:26 +0200 | [diff] [blame] | 4180 | rsvd_check->bad_mt_xwr = bad_mt_xwr; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4181 | } |
| 4182 | |
Xiao Guangrong | 81b8eeb | 2015-08-05 12:04:23 +0800 | [diff] [blame] | 4183 | static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, |
| 4184 | struct kvm_mmu *context, bool execonly) |
| 4185 | { |
| 4186 | __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, |
Sean Christopherson | 5b7f575 | 2021-02-03 16:01:13 -0800 | [diff] [blame] | 4187 | vcpu->arch.reserved_gpa_bits, execonly); |
Xiao Guangrong | 81b8eeb | 2015-08-05 12:04:23 +0800 | [diff] [blame] | 4188 | } |
| 4189 | |
Sean Christopherson | 6f8e65a | 2021-02-03 16:01:14 -0800 | [diff] [blame] | 4190 | static inline u64 reserved_hpa_bits(void) |
| 4191 | { |
| 4192 | return rsvd_bits(shadow_phys_bits, 63); |
| 4193 | } |
| 4194 | |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4195 | /* |
| 4196 | * the page table on host is the shadow page table for the page |
| 4197 | * table in guest or amd nested guest, its mmu features completely |
| 4198 | * follow the features in guest. |
| 4199 | */ |
| 4200 | void |
| 4201 | reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) |
| 4202 | { |
Sean Christopherson | 112022b | 2021-06-22 10:56:47 -0700 | [diff] [blame] | 4203 | /* |
| 4204 | * KVM uses NX when TDP is disabled to handle a variety of scenarios, |
| 4205 | * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and |
| 4206 | * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0. |
| 4207 | * The iTLB multi-hit workaround can be toggled at any time, so assume |
| 4208 | * NX can be used by any non-nested shadow MMU to avoid having to reset |
| 4209 | * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled. |
| 4210 | */ |
| 4211 | bool uses_nx = context->nx || !tdp_enabled || |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 4212 | context->mmu_role.base.smep_andnot_wp; |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4213 | struct rsvd_bits_validate *shadow_zero_check; |
| 4214 | int i; |
Paolo Bonzini | 5f0b819 | 2016-03-09 14:28:02 +0100 | [diff] [blame] | 4215 | |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4216 | /* |
| 4217 | * Passing "true" to the last argument is okay; it adds a check |
| 4218 | * on bit 8 of the SPTEs which KVM doesn't use anyway. |
| 4219 | */ |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4220 | shadow_zero_check = &context->shadow_zero_check; |
| 4221 | __reset_rsvds_bits_mask(vcpu, shadow_zero_check, |
Sean Christopherson | 6f8e65a | 2021-02-03 16:01:14 -0800 | [diff] [blame] | 4222 | reserved_hpa_bits(), |
Paolo Bonzini | 5f0b819 | 2016-03-09 14:28:02 +0100 | [diff] [blame] | 4223 | context->shadow_root_level, uses_nx, |
Radim Krčmář | d6321d4 | 2017-08-05 00:12:49 +0200 | [diff] [blame] | 4224 | guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), |
| 4225 | is_pse(vcpu), true); |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4226 | |
| 4227 | if (!shadow_me_mask) |
| 4228 | return; |
| 4229 | |
| 4230 | for (i = context->shadow_root_level; --i >= 0;) { |
| 4231 | shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; |
| 4232 | shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; |
| 4233 | } |
| 4234 | |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4235 | } |
| 4236 | EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); |
| 4237 | |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4238 | static inline bool boot_cpu_is_amd(void) |
| 4239 | { |
| 4240 | WARN_ON_ONCE(!tdp_enabled); |
| 4241 | return shadow_x_mask == 0; |
| 4242 | } |
| 4243 | |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4244 | /* |
| 4245 | * the direct page table on host, use as much mmu features as |
| 4246 | * possible, however, kvm currently does not do execution-protection. |
| 4247 | */ |
| 4248 | static void |
| 4249 | reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, |
| 4250 | struct kvm_mmu *context) |
| 4251 | { |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4252 | struct rsvd_bits_validate *shadow_zero_check; |
| 4253 | int i; |
| 4254 | |
| 4255 | shadow_zero_check = &context->shadow_zero_check; |
| 4256 | |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4257 | if (boot_cpu_is_amd()) |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4258 | __reset_rsvds_bits_mask(vcpu, shadow_zero_check, |
Sean Christopherson | 6f8e65a | 2021-02-03 16:01:14 -0800 | [diff] [blame] | 4259 | reserved_hpa_bits(), |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4260 | context->shadow_root_level, false, |
Borislav Petkov | b8291adc | 2016-03-29 17:41:58 +0200 | [diff] [blame] | 4261 | boot_cpu_has(X86_FEATURE_GBPAGES), |
| 4262 | true, true); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4263 | else |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4264 | __reset_rsvds_bits_mask_ept(shadow_zero_check, |
Sean Christopherson | 6f8e65a | 2021-02-03 16:01:14 -0800 | [diff] [blame] | 4265 | reserved_hpa_bits(), false); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4266 | |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4267 | if (!shadow_me_mask) |
| 4268 | return; |
| 4269 | |
| 4270 | for (i = context->shadow_root_level; --i >= 0;) { |
| 4271 | shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; |
| 4272 | shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; |
| 4273 | } |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4274 | } |
| 4275 | |
| 4276 | /* |
| 4277 | * as the comments in reset_shadow_zero_bits_mask() except it |
| 4278 | * is the shadow page table for intel nested guest. |
| 4279 | */ |
| 4280 | static void |
| 4281 | reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, |
| 4282 | struct kvm_mmu *context, bool execonly) |
| 4283 | { |
| 4284 | __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, |
Sean Christopherson | 6f8e65a | 2021-02-03 16:01:14 -0800 | [diff] [blame] | 4285 | reserved_hpa_bits(), execonly); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4286 | } |
| 4287 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4288 | #define BYTE_MASK(access) \ |
| 4289 | ((1 & (access) ? 2 : 0) | \ |
| 4290 | (2 & (access) ? 4 : 0) | \ |
| 4291 | (3 & (access) ? 8 : 0) | \ |
| 4292 | (4 & (access) ? 16 : 0) | \ |
| 4293 | (5 & (access) ? 32 : 0) | \ |
| 4294 | (6 & (access) ? 64 : 0) | \ |
| 4295 | (7 & (access) ? 128 : 0)) |
| 4296 | |
| 4297 | |
Xiao Guangrong | edc90b7 | 2015-05-11 22:55:21 +0800 | [diff] [blame] | 4298 | static void update_permission_bitmask(struct kvm_vcpu *vcpu, |
| 4299 | struct kvm_mmu *mmu, bool ept) |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4300 | { |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4301 | unsigned byte; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4302 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4303 | const u8 x = BYTE_MASK(ACC_EXEC_MASK); |
| 4304 | const u8 w = BYTE_MASK(ACC_WRITE_MASK); |
| 4305 | const u8 u = BYTE_MASK(ACC_USER_MASK); |
| 4306 | |
| 4307 | bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0; |
| 4308 | bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0; |
| 4309 | bool cr0_wp = is_write_protection(vcpu); |
| 4310 | |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4311 | for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4312 | unsigned pfec = byte << 1; |
| 4313 | |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 4314 | /* |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4315 | * Each "*f" variable has a 1 bit for each UWX value |
| 4316 | * that causes a fault with the given PFEC. |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 4317 | */ |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4318 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4319 | /* Faults from writes to non-writable pages */ |
Arnd Bergmann | a6a6d3b | 2019-07-12 11:12:30 +0200 | [diff] [blame] | 4320 | u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0; |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4321 | /* Faults from user mode accesses to supervisor pages */ |
Arnd Bergmann | a6a6d3b | 2019-07-12 11:12:30 +0200 | [diff] [blame] | 4322 | u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0; |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4323 | /* Faults from fetches of non-executable pages*/ |
Arnd Bergmann | a6a6d3b | 2019-07-12 11:12:30 +0200 | [diff] [blame] | 4324 | u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0; |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4325 | /* Faults from kernel mode fetches of user pages */ |
| 4326 | u8 smepf = 0; |
| 4327 | /* Faults from kernel mode accesses of user pages */ |
| 4328 | u8 smapf = 0; |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 4329 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4330 | if (!ept) { |
| 4331 | /* Faults from kernel mode accesses to user pages */ |
| 4332 | u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4333 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4334 | /* Not really needed: !nx will cause pte.nx to fault */ |
| 4335 | if (!mmu->nx) |
| 4336 | ff = 0; |
| 4337 | |
| 4338 | /* Allow supervisor writes if !cr0.wp */ |
| 4339 | if (!cr0_wp) |
| 4340 | wf = (pfec & PFERR_USER_MASK) ? wf : 0; |
| 4341 | |
| 4342 | /* Disallow supervisor fetches of user code if cr4.smep */ |
| 4343 | if (cr4_smep) |
| 4344 | smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0; |
| 4345 | |
| 4346 | /* |
| 4347 | * SMAP:kernel-mode data accesses from user-mode |
| 4348 | * mappings should fault. A fault is considered |
| 4349 | * as a SMAP violation if all of the following |
Peng Hao | 39337ad | 2018-10-04 11:45:00 -0400 | [diff] [blame] | 4350 | * conditions are true: |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4351 | * - X86_CR4_SMAP is set in CR4 |
| 4352 | * - A user page is accessed |
| 4353 | * - The access is not a fetch |
| 4354 | * - Page fault in kernel mode |
| 4355 | * - if CPL = 3 or X86_EFLAGS_AC is clear |
| 4356 | * |
| 4357 | * Here, we cover the first three conditions. |
| 4358 | * The fourth is computed dynamically in permission_fault(); |
| 4359 | * PFERR_RSVD_MASK bit will be set in PFEC if the access is |
| 4360 | * *not* subject to SMAP restrictions. |
| 4361 | */ |
| 4362 | if (cr4_smap) |
| 4363 | smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4364 | } |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4365 | |
| 4366 | mmu->permissions[byte] = ff | uf | wf | smepf | smapf; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4367 | } |
| 4368 | } |
| 4369 | |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4370 | /* |
| 4371 | * PKU is an additional mechanism by which the paging controls access to |
| 4372 | * user-mode addresses based on the value in the PKRU register. Protection |
| 4373 | * key violations are reported through a bit in the page fault error code. |
| 4374 | * Unlike other bits of the error code, the PK bit is not known at the |
| 4375 | * call site of e.g. gva_to_gpa; it must be computed directly in |
| 4376 | * permission_fault based on two bits of PKRU, on some machine state (CR4, |
| 4377 | * CR0, EFER, CPL), and on other bits of the error code and the page tables. |
| 4378 | * |
| 4379 | * In particular the following conditions come from the error code, the |
| 4380 | * page tables and the machine state: |
| 4381 | * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1 |
| 4382 | * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch) |
| 4383 | * - PK is always zero if U=0 in the page tables |
| 4384 | * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access. |
| 4385 | * |
| 4386 | * The PKRU bitmask caches the result of these four conditions. The error |
| 4387 | * code (minus the P bit) and the page table's U bit form an index into the |
| 4388 | * PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed |
| 4389 | * with the two bits of the PKRU register corresponding to the protection key. |
| 4390 | * For the first three conditions above the bits will be 00, thus masking |
| 4391 | * away both AD and WD. For all reads or if the last condition holds, WD |
| 4392 | * only will be masked away. |
| 4393 | */ |
| 4394 | static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 4395 | bool ept) |
| 4396 | { |
| 4397 | unsigned bit; |
| 4398 | bool wp; |
| 4399 | |
| 4400 | if (ept) { |
| 4401 | mmu->pkru_mask = 0; |
| 4402 | return; |
| 4403 | } |
| 4404 | |
| 4405 | /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */ |
| 4406 | if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) { |
| 4407 | mmu->pkru_mask = 0; |
| 4408 | return; |
| 4409 | } |
| 4410 | |
| 4411 | wp = is_write_protection(vcpu); |
| 4412 | |
| 4413 | for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { |
| 4414 | unsigned pfec, pkey_bits; |
| 4415 | bool check_pkey, check_write, ff, uf, wf, pte_user; |
| 4416 | |
| 4417 | pfec = bit << 1; |
| 4418 | ff = pfec & PFERR_FETCH_MASK; |
| 4419 | uf = pfec & PFERR_USER_MASK; |
| 4420 | wf = pfec & PFERR_WRITE_MASK; |
| 4421 | |
| 4422 | /* PFEC.RSVD is replaced by ACC_USER_MASK. */ |
| 4423 | pte_user = pfec & PFERR_RSVD_MASK; |
| 4424 | |
| 4425 | /* |
| 4426 | * Only need to check the access which is not an |
| 4427 | * instruction fetch and is to a user page. |
| 4428 | */ |
| 4429 | check_pkey = (!ff && pte_user); |
| 4430 | /* |
| 4431 | * write access is controlled by PKRU if it is a |
| 4432 | * user access or CR0.WP = 1. |
| 4433 | */ |
| 4434 | check_write = check_pkey && wf && (uf || wp); |
| 4435 | |
| 4436 | /* PKRU.AD stops both read and write access. */ |
| 4437 | pkey_bits = !!check_pkey; |
| 4438 | /* PKRU.WD stops write access. */ |
| 4439 | pkey_bits |= (!!check_write) << 1; |
| 4440 | |
| 4441 | mmu->pkru_mask |= (pkey_bits & 3) << pfec; |
| 4442 | } |
| 4443 | } |
| 4444 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4445 | static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4446 | { |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4447 | unsigned root_level = mmu->root_level; |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4448 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4449 | mmu->last_nonleaf_level = root_level; |
| 4450 | if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu)) |
| 4451 | mmu->last_nonleaf_level++; |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4452 | } |
| 4453 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4454 | static void paging64_init_context_common(struct kvm_vcpu *vcpu, |
| 4455 | struct kvm_mmu *context, |
| 4456 | int level) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4457 | { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4458 | context->nx = is_nx(vcpu); |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4459 | context->root_level = level; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4460 | |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4461 | reset_rsvds_bits_mask(vcpu, context); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4462 | update_permission_bitmask(vcpu, context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4463 | update_pkru_bitmask(vcpu, context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4464 | update_last_nonleaf_level(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4465 | |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 4466 | MMU_WARN_ON(!is_pae(vcpu)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4467 | context->page_fault = paging64_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4468 | context->gva_to_gpa = paging64_gva_to_gpa; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4469 | context->sync_page = paging64_sync_page; |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 4470 | context->invlpg = paging64_invlpg; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 4471 | context->shadow_root_level = level; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4472 | context->direct_map = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4473 | } |
| 4474 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4475 | static void paging64_init_context(struct kvm_vcpu *vcpu, |
| 4476 | struct kvm_mmu *context) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 4477 | { |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4478 | int root_level = is_la57_mode(vcpu) ? |
| 4479 | PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
| 4480 | |
| 4481 | paging64_init_context_common(vcpu, context, root_level); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 4482 | } |
| 4483 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4484 | static void paging32_init_context(struct kvm_vcpu *vcpu, |
| 4485 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4486 | { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4487 | context->nx = false; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4488 | context->root_level = PT32_ROOT_LEVEL; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4489 | |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4490 | reset_rsvds_bits_mask(vcpu, context); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4491 | update_permission_bitmask(vcpu, context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4492 | update_pkru_bitmask(vcpu, context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4493 | update_last_nonleaf_level(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4494 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4495 | context->page_fault = paging32_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4496 | context->gva_to_gpa = paging32_gva_to_gpa; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4497 | context->sync_page = paging32_sync_page; |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 4498 | context->invlpg = paging32_invlpg; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4499 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4500 | context->direct_map = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4501 | } |
| 4502 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4503 | static void paging32E_init_context(struct kvm_vcpu *vcpu, |
| 4504 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4505 | { |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4506 | paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4507 | } |
| 4508 | |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4509 | static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4510 | { |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4511 | union kvm_mmu_extended_role ext = {0}; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4512 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4513 | ext.cr0_pg = !!is_paging(vcpu); |
Vitaly Kuznetsov | 0699c64 | 2019-04-30 19:33:26 +0200 | [diff] [blame] | 4514 | ext.cr4_pae = !!is_pae(vcpu); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4515 | ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); |
| 4516 | ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); |
| 4517 | ext.cr4_pse = !!is_pse(vcpu); |
| 4518 | ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); |
Sean Christopherson | f71a53d | 2021-06-22 10:56:50 -0700 | [diff] [blame] | 4519 | ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4520 | |
| 4521 | ext.valid = 1; |
| 4522 | |
| 4523 | return ext; |
| 4524 | } |
| 4525 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4526 | static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, |
| 4527 | bool base_only) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4528 | { |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4529 | union kvm_mmu_role role = {0}; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4530 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4531 | role.base.access = ACC_ALL; |
| 4532 | role.base.nxe = !!is_nx(vcpu); |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4533 | role.base.cr0_wp = is_write_protection(vcpu); |
| 4534 | role.base.smm = is_smm(vcpu); |
| 4535 | role.base.guest_mode = is_guest_mode(vcpu); |
| 4536 | |
| 4537 | if (base_only) |
| 4538 | return role; |
| 4539 | |
| 4540 | role.ext = kvm_calc_mmu_role_ext(vcpu); |
| 4541 | |
| 4542 | return role; |
| 4543 | } |
| 4544 | |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4545 | static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu) |
| 4546 | { |
| 4547 | /* Use 5-level TDP if and only if it's useful/necessary. */ |
Sean Christopherson | 8301305 | 2020-07-15 20:41:22 -0700 | [diff] [blame] | 4548 | if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48) |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4549 | return 4; |
| 4550 | |
Sean Christopherson | 8301305 | 2020-07-15 20:41:22 -0700 | [diff] [blame] | 4551 | return max_tdp_level; |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4552 | } |
| 4553 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4554 | static union kvm_mmu_role |
| 4555 | kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) |
| 4556 | { |
| 4557 | union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); |
| 4558 | |
| 4559 | role.base.ad_disabled = (shadow_accessed_mask == 0); |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4560 | role.base.level = kvm_mmu_get_tdp_level(vcpu); |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4561 | role.base.direct = true; |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 4562 | role.base.gpte_is_8_bytes = true; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4563 | |
| 4564 | return role; |
| 4565 | } |
| 4566 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4567 | static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4568 | { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 4569 | struct kvm_mmu *context = &vcpu->arch.root_mmu; |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4570 | union kvm_mmu_role new_role = |
| 4571 | kvm_calc_tdp_mmu_root_page_role(vcpu, false); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4572 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4573 | if (new_role.as_u64 == context->mmu_role.as_u64) |
| 4574 | return; |
| 4575 | |
| 4576 | context->mmu_role.as_u64 = new_role.as_u64; |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 4577 | context->page_fault = kvm_tdp_page_fault; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4578 | context->sync_page = nonpaging_sync_page; |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 4579 | context->invlpg = NULL; |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4580 | context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu); |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4581 | context->direct_map = true; |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 4582 | context->get_guest_pgd = get_cr3; |
Avi Kivity | e4e517b | 2011-07-28 11:36:17 +0300 | [diff] [blame] | 4583 | context->get_pdptr = kvm_pdptr_read; |
Joerg Roedel | cb659db | 2010-09-10 17:30:43 +0200 | [diff] [blame] | 4584 | context->inject_page_fault = kvm_inject_page_fault; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4585 | |
| 4586 | if (!is_paging(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4587 | context->nx = false; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4588 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
| 4589 | context->root_level = 0; |
| 4590 | } else if (is_long_mode(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4591 | context->nx = is_nx(vcpu); |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4592 | context->root_level = is_la57_mode(vcpu) ? |
| 4593 | PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4594 | reset_rsvds_bits_mask(vcpu, context); |
| 4595 | context->gva_to_gpa = paging64_gva_to_gpa; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4596 | } else if (is_pae(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4597 | context->nx = is_nx(vcpu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4598 | context->root_level = PT32E_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4599 | reset_rsvds_bits_mask(vcpu, context); |
| 4600 | context->gva_to_gpa = paging64_gva_to_gpa; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4601 | } else { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4602 | context->nx = false; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4603 | context->root_level = PT32_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4604 | reset_rsvds_bits_mask(vcpu, context); |
| 4605 | context->gva_to_gpa = paging32_gva_to_gpa; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4606 | } |
| 4607 | |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4608 | update_permission_bitmask(vcpu, context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4609 | update_pkru_bitmask(vcpu, context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4610 | update_last_nonleaf_level(vcpu, context); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4611 | reset_tdp_shadow_zero_bits_mask(vcpu, context); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4612 | } |
| 4613 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4614 | static union kvm_mmu_role |
Sean Christopherson | 59505b5 | 2020-07-15 20:41:15 -0700 | [diff] [blame] | 4615 | kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4616 | { |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4617 | union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 4618 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4619 | role.base.smep_andnot_wp = role.ext.cr4_smep && |
| 4620 | !is_write_protection(vcpu); |
| 4621 | role.base.smap_andnot_wp = role.ext.cr4_smap && |
| 4622 | !is_write_protection(vcpu); |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 4623 | role.base.gpte_is_8_bytes = !!is_pae(vcpu); |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4624 | |
Sean Christopherson | 59505b5 | 2020-07-15 20:41:15 -0700 | [diff] [blame] | 4625 | return role; |
| 4626 | } |
| 4627 | |
| 4628 | static union kvm_mmu_role |
| 4629 | kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) |
| 4630 | { |
| 4631 | union kvm_mmu_role role = |
| 4632 | kvm_calc_shadow_root_page_role_common(vcpu, base_only); |
| 4633 | |
| 4634 | role.base.direct = !is_paging(vcpu); |
| 4635 | |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4636 | if (!is_long_mode(vcpu)) |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4637 | role.base.level = PT32E_ROOT_LEVEL; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4638 | else if (is_la57_mode(vcpu)) |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4639 | role.base.level = PT64_ROOT_5LEVEL; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4640 | else |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4641 | role.base.level = PT64_ROOT_4LEVEL; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4642 | |
| 4643 | return role; |
| 4644 | } |
| 4645 | |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 4646 | static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context, |
| 4647 | u32 cr0, u32 cr4, u32 efer, |
| 4648 | union kvm_mmu_role new_role) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4649 | { |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 4650 | if (!(cr0 & X86_CR0_PG)) |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4651 | nonpaging_init_context(vcpu, context); |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 4652 | else if (efer & EFER_LMA) |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4653 | paging64_init_context(vcpu, context); |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 4654 | else if (cr4 & X86_CR4_PAE) |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4655 | paging32E_init_context(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4656 | else |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4657 | paging32_init_context(vcpu, context); |
Avi Kivity | a770f6f | 2008-12-21 19:20:09 +0200 | [diff] [blame] | 4658 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4659 | context->mmu_role.as_u64 = new_role.as_u64; |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4660 | reset_shadow_zero_bits_mask(vcpu, context); |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 4661 | } |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 4662 | |
| 4663 | static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer) |
| 4664 | { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 4665 | struct kvm_mmu *context = &vcpu->arch.root_mmu; |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 4666 | union kvm_mmu_role new_role = |
| 4667 | kvm_calc_shadow_mmu_root_page_role(vcpu, false); |
| 4668 | |
| 4669 | if (new_role.as_u64 != context->mmu_role.as_u64) |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 4670 | shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role); |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 4671 | } |
| 4672 | |
Sean Christopherson | 59505b5 | 2020-07-15 20:41:15 -0700 | [diff] [blame] | 4673 | static union kvm_mmu_role |
| 4674 | kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu) |
| 4675 | { |
| 4676 | union kvm_mmu_role role = |
| 4677 | kvm_calc_shadow_root_page_role_common(vcpu, false); |
| 4678 | |
| 4679 | role.base.direct = false; |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4680 | role.base.level = kvm_mmu_get_tdp_level(vcpu); |
Sean Christopherson | 59505b5 | 2020-07-15 20:41:15 -0700 | [diff] [blame] | 4681 | |
| 4682 | return role; |
| 4683 | } |
| 4684 | |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 4685 | void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, |
| 4686 | gpa_t nested_cr3) |
| 4687 | { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 4688 | struct kvm_mmu *context = &vcpu->arch.guest_mmu; |
Sean Christopherson | 59505b5 | 2020-07-15 20:41:15 -0700 | [diff] [blame] | 4689 | union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu); |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 4690 | |
Sean Christopherson | b512910 | 2021-06-09 16:42:27 -0700 | [diff] [blame] | 4691 | __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base); |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 4692 | |
Sean Christopherson | a3322d5 | 2021-03-04 17:10:45 -0800 | [diff] [blame] | 4693 | if (new_role.as_u64 != context->mmu_role.as_u64) { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 4694 | shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role); |
Sean Christopherson | a3322d5 | 2021-03-04 17:10:45 -0800 | [diff] [blame] | 4695 | |
| 4696 | /* |
| 4697 | * Override the level set by the common init helper, nested TDP |
| 4698 | * always uses the host's TDP configuration. |
| 4699 | */ |
| 4700 | context->shadow_root_level = new_role.base.level; |
| 4701 | } |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 4702 | } |
| 4703 | EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 4704 | |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4705 | static union kvm_mmu_role |
| 4706 | kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 4707 | bool execonly, u8 level) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4708 | { |
Sean Christopherson | 552c69b1 | 2019-03-07 15:27:43 -0800 | [diff] [blame] | 4709 | union kvm_mmu_role role = {0}; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4710 | |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 4711 | /* SMM flag is inherited from root_mmu */ |
| 4712 | role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4713 | |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 4714 | role.base.level = level; |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 4715 | role.base.gpte_is_8_bytes = true; |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4716 | role.base.direct = false; |
| 4717 | role.base.ad_disabled = !accessed_dirty; |
| 4718 | role.base.guest_mode = true; |
| 4719 | role.base.access = ACC_ALL; |
| 4720 | |
Sean Christopherson | 552c69b1 | 2019-03-07 15:27:43 -0800 | [diff] [blame] | 4721 | role.ext = kvm_calc_mmu_role_ext(vcpu); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4722 | role.ext.execonly = execonly; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4723 | |
| 4724 | return role; |
| 4725 | } |
| 4726 | |
Paolo Bonzini | ae1e2d1 | 2017-03-30 11:55:30 +0200 | [diff] [blame] | 4727 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 4728 | bool accessed_dirty, gpa_t new_eptp) |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 4729 | { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 4730 | struct kvm_mmu *context = &vcpu->arch.guest_mmu; |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 4731 | u8 level = vmx_eptp_page_walk_level(new_eptp); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4732 | union kvm_mmu_role new_role = |
| 4733 | kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 4734 | execonly, level); |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 4735 | |
Sean Christopherson | b512910 | 2021-06-09 16:42:27 -0700 | [diff] [blame] | 4736 | __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4737 | |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4738 | if (new_role.as_u64 == context->mmu_role.as_u64) |
| 4739 | return; |
| 4740 | |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 4741 | context->shadow_root_level = level; |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 4742 | |
| 4743 | context->nx = true; |
Paolo Bonzini | ae1e2d1 | 2017-03-30 11:55:30 +0200 | [diff] [blame] | 4744 | context->ept_ad = accessed_dirty; |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 4745 | context->page_fault = ept_page_fault; |
| 4746 | context->gva_to_gpa = ept_gva_to_gpa; |
| 4747 | context->sync_page = ept_sync_page; |
| 4748 | context->invlpg = ept_invlpg; |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 4749 | context->root_level = level; |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 4750 | context->direct_map = false; |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4751 | context->mmu_role.as_u64 = new_role.as_u64; |
Vitaly Kuznetsov | 3dc773e | 2018-10-08 21:28:06 +0200 | [diff] [blame] | 4752 | |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 4753 | update_permission_bitmask(vcpu, context, true); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4754 | update_pkru_bitmask(vcpu, context, true); |
Ladi Prosek | fd19d3b4 | 2017-10-05 11:10:22 +0200 | [diff] [blame] | 4755 | update_last_nonleaf_level(vcpu, context); |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 4756 | reset_rsvds_bits_mask_ept(vcpu, context, execonly); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4757 | reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 4758 | } |
| 4759 | EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); |
| 4760 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4761 | static void init_kvm_softmmu(struct kvm_vcpu *vcpu) |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 4762 | { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 4763 | struct kvm_mmu *context = &vcpu->arch.root_mmu; |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 4764 | |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 4765 | kvm_init_shadow_mmu(vcpu, |
| 4766 | kvm_read_cr0_bits(vcpu, X86_CR0_PG), |
| 4767 | kvm_read_cr4_bits(vcpu, X86_CR4_PAE), |
| 4768 | vcpu->arch.efer); |
| 4769 | |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 4770 | context->get_guest_pgd = get_cr3; |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 4771 | context->get_pdptr = kvm_pdptr_read; |
| 4772 | context->inject_page_fault = kvm_inject_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4773 | } |
| 4774 | |
Sean Christopherson | 654430ef | 2021-06-10 15:00:26 -0700 | [diff] [blame] | 4775 | static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu) |
| 4776 | { |
| 4777 | union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false); |
| 4778 | |
| 4779 | /* |
| 4780 | * Nested MMUs are used only for walking L2's gva->gpa, they never have |
| 4781 | * shadow pages of their own and so "direct" has no meaning. Set it |
| 4782 | * to "true" to try to detect bogus usage of the nested MMU. |
| 4783 | */ |
| 4784 | role.base.direct = true; |
| 4785 | |
| 4786 | if (!is_paging(vcpu)) |
| 4787 | role.base.level = 0; |
| 4788 | else if (is_long_mode(vcpu)) |
| 4789 | role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL : |
| 4790 | PT64_ROOT_4LEVEL; |
| 4791 | else if (is_pae(vcpu)) |
| 4792 | role.base.level = PT32E_ROOT_LEVEL; |
| 4793 | else |
| 4794 | role.base.level = PT32_ROOT_LEVEL; |
| 4795 | |
| 4796 | return role; |
| 4797 | } |
| 4798 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4799 | static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4800 | { |
Sean Christopherson | 654430ef | 2021-06-10 15:00:26 -0700 | [diff] [blame] | 4801 | union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4802 | struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; |
| 4803 | |
Vitaly Kuznetsov | bf627a9 | 2018-10-08 21:28:13 +0200 | [diff] [blame] | 4804 | if (new_role.as_u64 == g_context->mmu_role.as_u64) |
| 4805 | return; |
| 4806 | |
| 4807 | g_context->mmu_role.as_u64 = new_role.as_u64; |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 4808 | g_context->get_guest_pgd = get_cr3; |
Avi Kivity | e4e517b | 2011-07-28 11:36:17 +0300 | [diff] [blame] | 4809 | g_context->get_pdptr = kvm_pdptr_read; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4810 | g_context->inject_page_fault = kvm_inject_page_fault; |
| 4811 | |
| 4812 | /* |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 4813 | * L2 page tables are never shadowed, so there is no need to sync |
| 4814 | * SPTEs. |
| 4815 | */ |
| 4816 | g_context->invlpg = NULL; |
| 4817 | |
| 4818 | /* |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 4819 | * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using |
David Matlack | 0af2593 | 2015-12-30 08:26:17 -0800 | [diff] [blame] | 4820 | * L1's nested page tables (e.g. EPT12). The nested translation |
| 4821 | * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using |
| 4822 | * L2's page tables as the first level of translation and L1's |
| 4823 | * nested page tables as the second level of translation. Basically |
| 4824 | * the gva_to_gpa functions between mmu and nested_mmu are swapped. |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4825 | */ |
| 4826 | if (!is_paging(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4827 | g_context->nx = false; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4828 | g_context->root_level = 0; |
| 4829 | g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; |
| 4830 | } else if (is_long_mode(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4831 | g_context->nx = is_nx(vcpu); |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4832 | g_context->root_level = is_la57_mode(vcpu) ? |
| 4833 | PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4834 | reset_rsvds_bits_mask(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4835 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
| 4836 | } else if (is_pae(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4837 | g_context->nx = is_nx(vcpu); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4838 | g_context->root_level = PT32E_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4839 | reset_rsvds_bits_mask(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4840 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
| 4841 | } else { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4842 | g_context->nx = false; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4843 | g_context->root_level = PT32_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4844 | reset_rsvds_bits_mask(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4845 | g_context->gva_to_gpa = paging32_gva_to_gpa_nested; |
| 4846 | } |
| 4847 | |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4848 | update_permission_bitmask(vcpu, g_context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4849 | update_pkru_bitmask(vcpu, g_context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4850 | update_last_nonleaf_level(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4851 | } |
| 4852 | |
Sean Christopherson | c906066 | 2021-06-09 16:42:33 -0700 | [diff] [blame] | 4853 | void kvm_init_mmu(struct kvm_vcpu *vcpu) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4854 | { |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4855 | if (mmu_is_nested(vcpu)) |
Paolo Bonzini | e0c6db3 | 2014-12-23 13:39:46 +0100 | [diff] [blame] | 4856 | init_kvm_nested_mmu(vcpu); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 4857 | else if (tdp_enabled) |
Paolo Bonzini | e0c6db3 | 2014-12-23 13:39:46 +0100 | [diff] [blame] | 4858 | init_kvm_tdp_mmu(vcpu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4859 | else |
Paolo Bonzini | e0c6db3 | 2014-12-23 13:39:46 +0100 | [diff] [blame] | 4860 | init_kvm_softmmu(vcpu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4861 | } |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 4862 | EXPORT_SYMBOL_GPL(kvm_init_mmu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4863 | |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4864 | static union kvm_mmu_page_role |
| 4865 | kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu) |
| 4866 | { |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4867 | union kvm_mmu_role role; |
| 4868 | |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4869 | if (tdp_enabled) |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4870 | role = kvm_calc_tdp_mmu_root_page_role(vcpu, true); |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4871 | else |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4872 | role = kvm_calc_shadow_mmu_root_page_role(vcpu, true); |
| 4873 | |
| 4874 | return role.base; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4875 | } |
Dong, Eddie | 489f1d6 | 2008-01-07 11:14:20 +0200 | [diff] [blame] | 4876 | |
Sean Christopherson | 49c6f87 | 2021-06-22 10:56:51 -0700 | [diff] [blame] | 4877 | void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) |
| 4878 | { |
| 4879 | /* |
| 4880 | * Invalidate all MMU roles to force them to reinitialize as CPUID |
| 4881 | * information is factored into reserved bit calculations. |
| 4882 | */ |
| 4883 | vcpu->arch.root_mmu.mmu_role.ext.valid = 0; |
| 4884 | vcpu->arch.guest_mmu.mmu_role.ext.valid = 0; |
| 4885 | vcpu->arch.nested_mmu.mmu_role.ext.valid = 0; |
| 4886 | kvm_mmu_reset_context(vcpu); |
Sean Christopherson | 63f5a19 | 2021-06-22 10:56:52 -0700 | [diff] [blame] | 4887 | |
| 4888 | /* |
| 4889 | * KVM does not correctly handle changing guest CPUID after KVM_RUN, as |
| 4890 | * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't |
| 4891 | * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page |
| 4892 | * faults due to reusing SPs/SPTEs. Alert userspace, but otherwise |
| 4893 | * sweep the problem under the rug. |
| 4894 | * |
| 4895 | * KVM's horrific CPUID ABI makes the problem all but impossible to |
| 4896 | * solve, as correctly handling multiple vCPU models (with respect to |
| 4897 | * paging and physical address properties) in a single VM would require |
| 4898 | * tracking all relevant CPUID information in kvm_mmu_page_role. That |
| 4899 | * is very undesirable as it would double the memory requirements for |
| 4900 | * gfn_track (see struct kvm_mmu_page_role comments), and in practice |
| 4901 | * no sane VMM mucks with the core vCPU model on the fly. |
| 4902 | */ |
| 4903 | if (vcpu->arch.last_vmentry_cpu != -1) { |
| 4904 | pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n"); |
| 4905 | pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n"); |
| 4906 | } |
Sean Christopherson | 49c6f87 | 2021-06-22 10:56:51 -0700 | [diff] [blame] | 4907 | } |
| 4908 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4909 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 4910 | { |
Paolo Bonzini | 95f93af | 2013-10-02 16:56:12 +0200 | [diff] [blame] | 4911 | kvm_mmu_unload(vcpu); |
Sean Christopherson | c906066 | 2021-06-09 16:42:33 -0700 | [diff] [blame] | 4912 | kvm_init_mmu(vcpu); |
Eddie Dong | 8668a3c | 2007-10-10 14:26:45 +0800 | [diff] [blame] | 4913 | } |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 4914 | EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); |
| 4915 | |
| 4916 | int kvm_mmu_load(struct kvm_vcpu *vcpu) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 4917 | { |
| 4918 | int r; |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 4919 | |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 4920 | r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map); |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 4921 | if (r) |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 4922 | goto out; |
Sean Christopherson | 748e52b | 2021-03-04 17:10:49 -0800 | [diff] [blame] | 4923 | r = mmu_alloc_special_roots(vcpu); |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 4924 | if (r) |
| 4925 | goto out; |
Paolo Bonzini | 4a38162 | 2021-04-08 08:10:25 -0400 | [diff] [blame] | 4926 | if (vcpu->arch.mmu->direct_map) |
Sean Christopherson | 6e6ec58 | 2021-03-04 17:10:50 -0800 | [diff] [blame] | 4927 | r = mmu_alloc_direct_roots(vcpu); |
| 4928 | else |
| 4929 | r = mmu_alloc_shadow_roots(vcpu); |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 4930 | if (r) |
| 4931 | goto out; |
Sean Christopherson | a91f387 | 2021-03-04 17:11:00 -0800 | [diff] [blame] | 4932 | |
| 4933 | kvm_mmu_sync_roots(vcpu); |
| 4934 | |
Paolo Bonzini | 727a7e2 | 2020-03-05 03:52:50 -0500 | [diff] [blame] | 4935 | kvm_mmu_load_pgd(vcpu); |
Jason Baron | b3646477 | 2021-01-14 22:27:56 -0500 | [diff] [blame] | 4936 | static_call(kvm_x86_tlb_flush_current)(vcpu); |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 4937 | out: |
| 4938 | return r; |
| 4939 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4940 | |
| 4941 | void kvm_mmu_unload(struct kvm_vcpu *vcpu) |
| 4942 | { |
Vitaly Kuznetsov | 14c07ad | 2018-10-08 21:28:08 +0200 | [diff] [blame] | 4943 | kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); |
| 4944 | WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); |
| 4945 | kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); |
| 4946 | WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4947 | } |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 4948 | |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 4949 | static bool need_remote_flush(u64 old, u64 new) |
| 4950 | { |
| 4951 | if (!is_shadow_present_pte(old)) |
| 4952 | return false; |
| 4953 | if (!is_shadow_present_pte(new)) |
| 4954 | return true; |
| 4955 | if ((old ^ new) & PT64_BASE_ADDR_MASK) |
| 4956 | return true; |
Gleb Natapov | 5316622 | 2013-08-05 11:07:14 +0300 | [diff] [blame] | 4957 | old ^= shadow_nx_mask; |
| 4958 | new ^= shadow_nx_mask; |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 4959 | return (old & ~new & PT64_PERM_MASK) != 0; |
| 4960 | } |
| 4961 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 4962 | static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 4963 | int *bytes) |
Avi Kivity | da4a00f | 2007-01-05 16:36:44 -0800 | [diff] [blame] | 4964 | { |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 4965 | u64 gentry = 0; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 4966 | int r; |
Avi Kivity | 72016f3 | 2010-03-15 13:59:53 +0200 | [diff] [blame] | 4967 | |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 4968 | /* |
| 4969 | * Assume that the pte write on a page table of the same type |
Xiao Guangrong | 49b26e2 | 2011-03-04 19:00:00 +0800 | [diff] [blame] | 4970 | * as the current vcpu paging mode since we update the sptes only |
| 4971 | * when they have the same mode. |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 4972 | */ |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 4973 | if (is_pae(vcpu) && *bytes == 4) { |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 4974 | /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 4975 | *gpa &= ~(gpa_t)7; |
| 4976 | *bytes = 8; |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 4977 | } |
| 4978 | |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 4979 | if (*bytes == 4 || *bytes == 8) { |
| 4980 | r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes); |
| 4981 | if (r) |
| 4982 | gentry = 0; |
Avi Kivity | 72016f3 | 2010-03-15 13:59:53 +0200 | [diff] [blame] | 4983 | } |
| 4984 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 4985 | return gentry; |
| 4986 | } |
| 4987 | |
| 4988 | /* |
| 4989 | * If we're seeing too many writes to a page, it may no longer be a page table, |
| 4990 | * or we may be forking, in which case it is better to unmap the page. |
| 4991 | */ |
Xiao Guangrong | a138fe7 | 2011-12-16 18:18:10 +0800 | [diff] [blame] | 4992 | static bool detect_write_flooding(struct kvm_mmu_page *sp) |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 4993 | { |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 4994 | /* |
| 4995 | * Skip write-flooding detected for the sp whose level is 1, because |
| 4996 | * it can become unsync, then the guest page is not write-protected. |
| 4997 | */ |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 4998 | if (sp->role.level == PG_LEVEL_4K) |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 4999 | return false; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5000 | |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 5001 | atomic_inc(&sp->write_flooding_count); |
| 5002 | return atomic_read(&sp->write_flooding_count) >= 3; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5003 | } |
| 5004 | |
| 5005 | /* |
| 5006 | * Misaligned accesses are too much trouble to fix up; also, they usually |
| 5007 | * indicate a page is not used as a page table. |
| 5008 | */ |
| 5009 | static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, |
| 5010 | int bytes) |
| 5011 | { |
| 5012 | unsigned offset, pte_size, misaligned; |
| 5013 | |
| 5014 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", |
| 5015 | gpa, bytes, sp->role.word); |
| 5016 | |
| 5017 | offset = offset_in_page(gpa); |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5018 | pte_size = sp->role.gpte_is_8_bytes ? 8 : 4; |
Xiao Guangrong | 5d9ca30 | 2011-09-22 16:57:55 +0800 | [diff] [blame] | 5019 | |
| 5020 | /* |
| 5021 | * Sometimes, the OS only writes the last one bytes to update status |
| 5022 | * bits, for example, in linux, andb instruction is used in clear_bit(). |
| 5023 | */ |
| 5024 | if (!(offset & (pte_size - 1)) && bytes == 1) |
| 5025 | return false; |
| 5026 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5027 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); |
| 5028 | misaligned |= bytes < 4; |
| 5029 | |
| 5030 | return misaligned; |
| 5031 | } |
| 5032 | |
| 5033 | static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) |
| 5034 | { |
| 5035 | unsigned page_offset, quadrant; |
| 5036 | u64 *spte; |
| 5037 | int level; |
| 5038 | |
| 5039 | page_offset = offset_in_page(gpa); |
| 5040 | level = sp->role.level; |
| 5041 | *nspte = 1; |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5042 | if (!sp->role.gpte_is_8_bytes) { |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5043 | page_offset <<= 1; /* 32->64 */ |
| 5044 | /* |
| 5045 | * A 32-bit pde maps 4MB while the shadow pdes map |
| 5046 | * only 2MB. So we need to double the offset again |
| 5047 | * and zap two pdes instead of one. |
| 5048 | */ |
| 5049 | if (level == PT32_ROOT_LEVEL) { |
| 5050 | page_offset &= ~7; /* kill rounding error */ |
| 5051 | page_offset <<= 1; |
| 5052 | *nspte = 2; |
| 5053 | } |
| 5054 | quadrant = page_offset >> PAGE_SHIFT; |
| 5055 | page_offset &= ~PAGE_MASK; |
| 5056 | if (quadrant != sp->role.quadrant) |
| 5057 | return NULL; |
| 5058 | } |
| 5059 | |
| 5060 | spte = &sp->spt[page_offset / sizeof(*spte)]; |
| 5061 | return spte; |
| 5062 | } |
| 5063 | |
Xiao Guangrong | 13d268c | 2016-02-24 17:51:16 +0800 | [diff] [blame] | 5064 | static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
Jike Song | d126363 | 2016-10-25 15:50:42 +0800 | [diff] [blame] | 5065 | const u8 *new, int bytes, |
| 5066 | struct kvm_page_track_notifier_node *node) |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5067 | { |
| 5068 | gfn_t gfn = gpa >> PAGE_SHIFT; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5069 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5070 | LIST_HEAD(invalid_list); |
| 5071 | u64 entry, gentry, *spte; |
| 5072 | int npte; |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5073 | bool remote_flush, local_flush; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5074 | |
| 5075 | /* |
| 5076 | * If we don't have indirect shadow pages, it means no page is |
| 5077 | * write-protected, so we can exit simply. |
| 5078 | */ |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 5079 | if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5080 | return; |
| 5081 | |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5082 | remote_flush = local_flush = false; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5083 | |
| 5084 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
| 5085 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5086 | /* |
| 5087 | * No need to care whether allocation memory is successful |
Ingo Molnar | d9f6e12 | 2021-03-18 15:28:01 +0100 | [diff] [blame] | 5088 | * or not since pte prefetch is skipped if it does not have |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5089 | * enough objects in the cache. |
| 5090 | */ |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 5091 | mmu_topup_memory_caches(vcpu, true); |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5092 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5093 | write_lock(&vcpu->kvm->mmu_lock); |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 5094 | |
| 5095 | gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); |
| 5096 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5097 | ++vcpu->kvm->stat.mmu_pte_write; |
Xiao Guangrong | 0375f7f | 2011-11-28 20:41:00 +0800 | [diff] [blame] | 5098 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5099 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 5100 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 5101 | if (detect_write_misaligned(sp, gpa, bytes) || |
Xiao Guangrong | a138fe7 | 2011-12-16 18:18:10 +0800 | [diff] [blame] | 5102 | detect_write_flooding(sp)) { |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5103 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); |
Avi Kivity | 4cee576 | 2007-11-18 16:37:07 +0200 | [diff] [blame] | 5104 | ++vcpu->kvm->stat.mmu_flooded; |
Avi Kivity | 0e7bc4b | 2007-01-05 16:36:48 -0800 | [diff] [blame] | 5105 | continue; |
| 5106 | } |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5107 | |
| 5108 | spte = get_written_sptes(sp, gpa, &npte); |
| 5109 | if (!spte) |
| 5110 | continue; |
| 5111 | |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 5112 | local_flush = true; |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 5113 | while (npte--) { |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 5114 | entry = *spte; |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 5115 | mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL); |
Sean Christopherson | c5e2184 | 2021-01-14 16:40:51 -0800 | [diff] [blame] | 5116 | if (gentry && sp->role.level != PG_LEVEL_4K) |
| 5117 | ++vcpu->kvm->stat.mmu_pde_zapped; |
Gleb Natapov | 9bb4f6b | 2013-01-30 16:45:01 +0200 | [diff] [blame] | 5118 | if (need_remote_flush(entry, *spte)) |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 5119 | remote_flush = true; |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 5120 | ++spte; |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5121 | } |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5122 | } |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5123 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); |
Xiao Guangrong | 0375f7f | 2011-11-28 20:41:00 +0800 | [diff] [blame] | 5124 | kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5125 | write_unlock(&vcpu->kvm->mmu_lock); |
Avi Kivity | da4a00f | 2007-01-05 16:36:44 -0800 | [diff] [blame] | 5126 | } |
| 5127 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5128 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, |
Andre Przywara | dc25e89 | 2010-12-21 11:12:07 +0100 | [diff] [blame] | 5129 | void *insn, int insn_len) |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5130 | { |
Sean Christopherson | 92daa48 | 2020-02-18 15:03:08 -0800 | [diff] [blame] | 5131 | int r, emulation_type = EMULTYPE_PF; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5132 | bool direct = vcpu->arch.mmu->direct_map; |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5133 | |
Sean Christopherson | 6948199 | 2019-12-06 15:57:29 -0800 | [diff] [blame] | 5134 | if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) |
Sean Christopherson | ddce620 | 2019-12-06 15:57:27 -0800 | [diff] [blame] | 5135 | return RET_PF_RETRY; |
| 5136 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 5137 | r = RET_PF_INVALID; |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5138 | if (unlikely(error_code & PFERR_RSVD_MASK)) { |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5139 | r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct); |
Sean Christopherson | 472faff | 2018-08-23 13:56:50 -0700 | [diff] [blame] | 5140 | if (r == RET_PF_EMULATE) |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5141 | goto emulate; |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5142 | } |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5143 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 5144 | if (r == RET_PF_INVALID) { |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 5145 | r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, |
| 5146 | lower_32_bits(error_code), false); |
Sean Christopherson | 7b367bc | 2020-09-23 15:04:22 -0700 | [diff] [blame] | 5147 | if (WARN_ON_ONCE(r == RET_PF_INVALID)) |
| 5148 | return -EIO; |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 5149 | } |
| 5150 | |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5151 | if (r < 0) |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5152 | return r; |
Sean Christopherson | 83a2ba4 | 2020-09-23 15:04:23 -0700 | [diff] [blame] | 5153 | if (r != RET_PF_EMULATE) |
| 5154 | return 1; |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5155 | |
Tom Lendacky | 1472775 | 2016-11-23 12:01:38 -0500 | [diff] [blame] | 5156 | /* |
| 5157 | * Before emulating the instruction, check if the error code |
| 5158 | * was due to a RO violation while translating the guest page. |
| 5159 | * This can occur when using nested virtualization with nested |
| 5160 | * paging in both guests. If true, we simply unprotect the page |
| 5161 | * and resume the guest. |
Tom Lendacky | 1472775 | 2016-11-23 12:01:38 -0500 | [diff] [blame] | 5162 | */ |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5163 | if (vcpu->arch.mmu->direct_map && |
Paolo Bonzini | eebed24 | 2016-11-28 14:39:58 +0100 | [diff] [blame] | 5164 | (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5165 | kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)); |
Tom Lendacky | 1472775 | 2016-11-23 12:01:38 -0500 | [diff] [blame] | 5166 | return 1; |
| 5167 | } |
| 5168 | |
Sean Christopherson | 472faff | 2018-08-23 13:56:50 -0700 | [diff] [blame] | 5169 | /* |
| 5170 | * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still |
| 5171 | * optimistically try to just unprotect the page and let the processor |
| 5172 | * re-execute the instruction that caused the page fault. Do not allow |
| 5173 | * retrying MMIO emulation, as it's not only pointless but could also |
| 5174 | * cause us to enter an infinite loop because the processor will keep |
Sean Christopherson | 6c3dfeb | 2018-08-23 13:56:51 -0700 | [diff] [blame] | 5175 | * faulting on the non-existent MMIO address. Retrying an instruction |
| 5176 | * from a nested guest is also pointless and dangerous as we are only |
| 5177 | * explicitly shadowing L1's page tables, i.e. unprotecting something |
| 5178 | * for L1 isn't going to magically fix whatever issue cause L2 to fail. |
Sean Christopherson | 472faff | 2018-08-23 13:56:50 -0700 | [diff] [blame] | 5179 | */ |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5180 | if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu)) |
Sean Christopherson | 92daa48 | 2020-02-18 15:03:08 -0800 | [diff] [blame] | 5181 | emulation_type |= EMULTYPE_ALLOW_RETRY_PF; |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5182 | emulate: |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5183 | return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 5184 | insn_len); |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5185 | } |
| 5186 | EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); |
| 5187 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5188 | void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 5189 | gva_t gva, hpa_t root_hpa) |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 5190 | { |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5191 | int i; |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 5192 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5193 | /* It's actually a GPA for vcpu->arch.guest_mmu. */ |
| 5194 | if (mmu != &vcpu->arch.guest_mmu) { |
| 5195 | /* INVLPG on a non-canonical address is a NOP according to the SDM. */ |
| 5196 | if (is_noncanonical_address(gva, vcpu)) |
| 5197 | return; |
| 5198 | |
Jason Baron | b3646477 | 2021-01-14 22:27:56 -0500 | [diff] [blame] | 5199 | static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5200 | } |
| 5201 | |
| 5202 | if (!mmu->invlpg) |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5203 | return; |
| 5204 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5205 | if (root_hpa == INVALID_PAGE) { |
| 5206 | mmu->invlpg(vcpu, gva, mmu->root_hpa); |
Junaid Shahid | 956bf35 | 2018-06-27 14:59:18 -0700 | [diff] [blame] | 5207 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5208 | /* |
| 5209 | * INVLPG is required to invalidate any global mappings for the VA, |
| 5210 | * irrespective of PCID. Since it would take us roughly similar amount |
| 5211 | * of work to determine whether any of the prev_root mappings of the VA |
| 5212 | * is marked global, or to just sync it blindly, so we might as well |
| 5213 | * just always sync it. |
| 5214 | * |
| 5215 | * Mappings not reachable via the current cr3 or the prev_roots will be |
| 5216 | * synced when switching to that cr3, so nothing needs to be done here |
| 5217 | * for them. |
| 5218 | */ |
| 5219 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 5220 | if (VALID_PAGE(mmu->prev_roots[i].hpa)) |
| 5221 | mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); |
| 5222 | } else { |
| 5223 | mmu->invlpg(vcpu, gva, root_hpa); |
| 5224 | } |
| 5225 | } |
Junaid Shahid | 956bf35 | 2018-06-27 14:59:18 -0700 | [diff] [blame] | 5226 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5227 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) |
| 5228 | { |
| 5229 | kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE); |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 5230 | ++vcpu->stat.invlpg; |
| 5231 | } |
| 5232 | EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); |
| 5233 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5234 | |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5235 | void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) |
| 5236 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5237 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5238 | bool tlb_flush = false; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5239 | uint i; |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5240 | |
| 5241 | if (pcid == kvm_get_active_pcid(vcpu)) { |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 5242 | mmu->invlpg(vcpu, gva, mmu->root_hpa); |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5243 | tlb_flush = true; |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5244 | } |
| 5245 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5246 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { |
| 5247 | if (VALID_PAGE(mmu->prev_roots[i].hpa) && |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 5248 | pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) { |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5249 | mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); |
| 5250 | tlb_flush = true; |
| 5251 | } |
Junaid Shahid | 956bf35 | 2018-06-27 14:59:18 -0700 | [diff] [blame] | 5252 | } |
Junaid Shahid | ade61e2 | 2018-06-27 14:59:15 -0700 | [diff] [blame] | 5253 | |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5254 | if (tlb_flush) |
Jason Baron | b3646477 | 2021-01-14 22:27:56 -0500 | [diff] [blame] | 5255 | static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5256 | |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5257 | ++vcpu->stat.invlpg; |
| 5258 | |
| 5259 | /* |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5260 | * Mappings not reachable via the current cr3 or the prev_roots will be |
| 5261 | * synced when switching to that cr3, so nothing needs to be done here |
| 5262 | * for them. |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5263 | */ |
| 5264 | } |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5265 | |
Sean Christopherson | 8301305 | 2020-07-15 20:41:22 -0700 | [diff] [blame] | 5266 | void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level, |
| 5267 | int tdp_huge_page_level) |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 5268 | { |
Sean Christopherson | bde7723 | 2020-03-02 15:57:02 -0800 | [diff] [blame] | 5269 | tdp_enabled = enable_tdp; |
Sean Christopherson | 8301305 | 2020-07-15 20:41:22 -0700 | [diff] [blame] | 5270 | max_tdp_level = tdp_max_root_level; |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 5271 | |
| 5272 | /* |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 5273 | * max_huge_page_level reflects KVM's MMU capabilities irrespective |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 5274 | * of kernel support, e.g. KVM may be capable of using 1GB pages when |
| 5275 | * the kernel is not. But, KVM never creates a page size greater than |
| 5276 | * what is used by the kernel for any given HVA, i.e. the kernel's |
| 5277 | * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust(). |
| 5278 | */ |
| 5279 | if (tdp_enabled) |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 5280 | max_huge_page_level = tdp_huge_page_level; |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 5281 | else if (boot_cpu_has(X86_FEATURE_GBPAGES)) |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 5282 | max_huge_page_level = PG_LEVEL_1G; |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 5283 | else |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 5284 | max_huge_page_level = PG_LEVEL_2M; |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 5285 | } |
Sean Christopherson | bde7723 | 2020-03-02 15:57:02 -0800 | [diff] [blame] | 5286 | EXPORT_SYMBOL_GPL(kvm_configure_mmu); |
Xiao Guangrong | 13d268c | 2016-02-24 17:51:16 +0800 | [diff] [blame] | 5287 | |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5288 | /* The return value indicates if tlb flush on all vcpus is needed. */ |
Sean Christopherson | 0a234f5 | 2021-02-12 16:50:05 -0800 | [diff] [blame] | 5289 | typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
| 5290 | struct kvm_memory_slot *slot); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5291 | |
| 5292 | /* The caller should hold mmu-lock before calling this function. */ |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5293 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5294 | slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5295 | slot_level_handler fn, int start_level, int end_level, |
Sean Christopherson | 1a61b7d | 2021-03-25 19:19:43 -0700 | [diff] [blame] | 5296 | gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield, |
| 5297 | bool flush) |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5298 | { |
| 5299 | struct slot_rmap_walk_iterator iterator; |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5300 | |
| 5301 | for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, |
| 5302 | end_gfn, &iterator) { |
| 5303 | if (iterator.rmap) |
Sean Christopherson | 0a234f5 | 2021-02-12 16:50:05 -0800 | [diff] [blame] | 5304 | flush |= fn(kvm, iterator.rmap, memslot); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5305 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5306 | if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { |
Sean Christopherson | 302695a | 2021-03-25 19:19:41 -0700 | [diff] [blame] | 5307 | if (flush && flush_on_yield) { |
Ben Gardon | f285c63 | 2019-03-12 11:45:59 -0700 | [diff] [blame] | 5308 | kvm_flush_remote_tlbs_with_address(kvm, |
| 5309 | start_gfn, |
| 5310 | iterator.gfn - start_gfn + 1); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5311 | flush = false; |
| 5312 | } |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5313 | cond_resched_rwlock_write(&kvm->mmu_lock); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5314 | } |
| 5315 | } |
| 5316 | |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5317 | return flush; |
| 5318 | } |
| 5319 | |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5320 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5321 | slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5322 | slot_level_handler fn, int start_level, int end_level, |
Sean Christopherson | 302695a | 2021-03-25 19:19:41 -0700 | [diff] [blame] | 5323 | bool flush_on_yield) |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5324 | { |
| 5325 | return slot_handle_level_range(kvm, memslot, fn, start_level, |
| 5326 | end_level, memslot->base_gfn, |
| 5327 | memslot->base_gfn + memslot->npages - 1, |
Sean Christopherson | 1a61b7d | 2021-03-25 19:19:43 -0700 | [diff] [blame] | 5328 | flush_on_yield, false); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5329 | } |
| 5330 | |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5331 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5332 | slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, |
Sean Christopherson | 302695a | 2021-03-25 19:19:41 -0700 | [diff] [blame] | 5333 | slot_level_handler fn, bool flush_on_yield) |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5334 | { |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 5335 | return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, |
Sean Christopherson | 302695a | 2021-03-25 19:19:41 -0700 | [diff] [blame] | 5336 | PG_LEVEL_4K, flush_on_yield); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5337 | } |
| 5338 | |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5339 | static void free_mmu_pages(struct kvm_mmu *mmu) |
Takuya Yoshikawa | b99db1d | 2013-01-08 19:44:48 +0900 | [diff] [blame] | 5340 | { |
Sean Christopherson | 4a98623 | 2021-03-09 14:42:07 -0800 | [diff] [blame] | 5341 | if (!tdp_enabled && mmu->pae_root) |
| 5342 | set_memory_encrypted((unsigned long)mmu->pae_root, 1); |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5343 | free_page((unsigned long)mmu->pae_root); |
Sean Christopherson | 03ca458 | 2021-05-05 13:42:21 -0700 | [diff] [blame] | 5344 | free_page((unsigned long)mmu->pml4_root); |
Takuya Yoshikawa | 6b81b05 | 2013-01-08 19:47:33 +0900 | [diff] [blame] | 5345 | } |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5346 | |
Sean Christopherson | 04d28e3 | 2020-09-23 09:33:14 -0700 | [diff] [blame] | 5347 | static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
Avi Kivity | 8234b22 | 2010-12-27 12:08:45 +0200 | [diff] [blame] | 5348 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5349 | struct page *page; |
Takuya Yoshikawa | b99db1d | 2013-01-08 19:44:48 +0900 | [diff] [blame] | 5350 | int i; |
Takuya Yoshikawa | 9d1beef | 2013-01-08 19:46:48 +0900 | [diff] [blame] | 5351 | |
Sean Christopherson | 04d28e3 | 2020-09-23 09:33:14 -0700 | [diff] [blame] | 5352 | mmu->root_hpa = INVALID_PAGE; |
| 5353 | mmu->root_pgd = 0; |
| 5354 | mmu->translate_gpa = translate_gpa; |
| 5355 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 5356 | mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; |
| 5357 | |
Sean Christopherson | b6b80c7 | 2019-06-13 10:22:23 -0700 | [diff] [blame] | 5358 | /* |
| 5359 | * When using PAE paging, the four PDPTEs are treated as 'root' pages, |
| 5360 | * while the PDP table is a per-vCPU construct that's allocated at MMU |
| 5361 | * creation. When emulating 32-bit mode, cr3 is only 32 bits even on |
| 5362 | * x86_64. Therefore we need to allocate the PDP table in the first |
Sean Christopherson | 04d4555 | 2021-03-04 17:10:46 -0800 | [diff] [blame] | 5363 | * 4GB of memory, which happens to fit the DMA32 zone. TDP paging |
| 5364 | * generally doesn't use PAE paging and can skip allocating the PDP |
| 5365 | * table. The main exception, handled here, is SVM's 32-bit NPT. The |
| 5366 | * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit |
| 5367 | * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots(). |
Sean Christopherson | b6b80c7 | 2019-06-13 10:22:23 -0700 | [diff] [blame] | 5368 | */ |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 5369 | if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5370 | return 0; |
| 5371 | |
Ben Gardon | 254272c | 2019-02-11 11:02:50 -0800 | [diff] [blame] | 5372 | page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32); |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5373 | if (!page) |
| 5374 | return -ENOMEM; |
| 5375 | |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5376 | mmu->pae_root = page_address(page); |
Sean Christopherson | 4a98623 | 2021-03-09 14:42:07 -0800 | [diff] [blame] | 5377 | |
| 5378 | /* |
| 5379 | * CR3 is only 32 bits when PAE paging is used, thus it's impossible to |
| 5380 | * get the CPU to treat the PDPTEs as encrypted. Decrypt the page so |
| 5381 | * that KVM's writes and the CPU's reads get along. Note, this is |
| 5382 | * only necessary when using shadow paging, as 64-bit NPT can get at |
| 5383 | * the C-bit even when shadowing 32-bit NPT, and SME isn't supported |
| 5384 | * by 32-bit kernels (when KVM itself uses 32-bit NPT). |
| 5385 | */ |
| 5386 | if (!tdp_enabled) |
| 5387 | set_memory_decrypted((unsigned long)mmu->pae_root, 1); |
| 5388 | else |
| 5389 | WARN_ON_ONCE(shadow_me_mask); |
| 5390 | |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5391 | for (i = 0; i < 4; ++i) |
Sean Christopherson | c834e5e4 | 2021-03-09 14:42:06 -0800 | [diff] [blame] | 5392 | mmu->pae_root[i] = INVALID_PAE_ROOT; |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5393 | |
| 5394 | return 0; |
| 5395 | } |
| 5396 | |
Kai Huang | d91ffee | 2015-01-12 15:28:54 +0800 | [diff] [blame] | 5397 | int kvm_mmu_create(struct kvm_vcpu *vcpu) |
| 5398 | { |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5399 | int ret; |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 5400 | |
Sean Christopherson | 5962bfb | 2020-07-02 19:35:25 -0700 | [diff] [blame] | 5401 | vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache; |
Sean Christopherson | 5f6078f | 2020-07-02 19:35:34 -0700 | [diff] [blame] | 5402 | vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO; |
| 5403 | |
Sean Christopherson | 5962bfb | 2020-07-02 19:35:25 -0700 | [diff] [blame] | 5404 | vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache; |
Sean Christopherson | 5f6078f | 2020-07-02 19:35:34 -0700 | [diff] [blame] | 5405 | vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO; |
Sean Christopherson | 5962bfb | 2020-07-02 19:35:25 -0700 | [diff] [blame] | 5406 | |
Sean Christopherson | 9688088 | 2020-07-02 19:35:35 -0700 | [diff] [blame] | 5407 | vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO; |
| 5408 | |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5409 | vcpu->arch.mmu = &vcpu->arch.root_mmu; |
| 5410 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; |
| 5411 | |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5412 | vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5413 | |
Sean Christopherson | 04d28e3 | 2020-09-23 09:33:14 -0700 | [diff] [blame] | 5414 | ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu); |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5415 | if (ret) |
| 5416 | return ret; |
| 5417 | |
Sean Christopherson | 04d28e3 | 2020-09-23 09:33:14 -0700 | [diff] [blame] | 5418 | ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu); |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5419 | if (ret) |
| 5420 | goto fail_allocate_root; |
| 5421 | |
| 5422 | return ret; |
| 5423 | fail_allocate_root: |
| 5424 | free_mmu_pages(&vcpu->arch.guest_mmu); |
| 5425 | return ret; |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5426 | } |
| 5427 | |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5428 | #define BATCH_ZAP_PAGES 10 |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5429 | static void kvm_zap_obsolete_pages(struct kvm *kvm) |
| 5430 | { |
| 5431 | struct kvm_mmu_page *sp, *node; |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5432 | int nr_zapped, batch = 0; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5433 | |
| 5434 | restart: |
| 5435 | list_for_each_entry_safe_reverse(sp, node, |
| 5436 | &kvm->arch.active_mmu_pages, link) { |
| 5437 | /* |
| 5438 | * No obsolete valid page exists before a newly created page |
| 5439 | * since active_mmu_pages is a FIFO list. |
| 5440 | */ |
| 5441 | if (!is_obsolete_sp(kvm, sp)) |
| 5442 | break; |
| 5443 | |
| 5444 | /* |
Sean Christopherson | f95eec9 | 2020-06-23 12:35:39 -0700 | [diff] [blame] | 5445 | * Invalid pages should never land back on the list of active |
| 5446 | * pages. Skip the bogus page, otherwise we'll get stuck in an |
| 5447 | * infinite loop if the page gets put back on the list (again). |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5448 | */ |
Sean Christopherson | f95eec9 | 2020-06-23 12:35:39 -0700 | [diff] [blame] | 5449 | if (WARN_ON(sp->role.invalid)) |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5450 | continue; |
| 5451 | |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5452 | /* |
| 5453 | * No need to flush the TLB since we're only zapping shadow |
| 5454 | * pages with an obsolete generation number and all vCPUS have |
| 5455 | * loaded a new root, i.e. the shadow pages being zapped cannot |
| 5456 | * be in active use by the guest. |
| 5457 | */ |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5458 | if (batch >= BATCH_ZAP_PAGES && |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5459 | cond_resched_rwlock_write(&kvm->mmu_lock)) { |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5460 | batch = 0; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5461 | goto restart; |
| 5462 | } |
| 5463 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5464 | if (__kvm_mmu_prepare_zap_page(kvm, sp, |
| 5465 | &kvm->arch.zapped_obsolete_pages, &nr_zapped)) { |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5466 | batch += nr_zapped; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5467 | goto restart; |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5468 | } |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5469 | } |
| 5470 | |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5471 | /* |
| 5472 | * Trigger a remote TLB flush before freeing the page tables to ensure |
| 5473 | * KVM is not in the middle of a lockless shadow page table walk, which |
| 5474 | * may reference the pages. |
| 5475 | */ |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5476 | kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5477 | } |
| 5478 | |
| 5479 | /* |
| 5480 | * Fast invalidate all shadow pages and use lock-break technique |
| 5481 | * to zap obsolete pages. |
| 5482 | * |
| 5483 | * It's required when memslot is being deleted or VM is being |
| 5484 | * destroyed, in these cases, we should ensure that KVM MMU does |
| 5485 | * not use any resource of the being-deleted slot or all slots |
| 5486 | * after calling the function. |
| 5487 | */ |
| 5488 | static void kvm_mmu_zap_all_fast(struct kvm *kvm) |
| 5489 | { |
Sean Christopherson | ca333ad | 2019-09-12 19:46:11 -0700 | [diff] [blame] | 5490 | lockdep_assert_held(&kvm->slots_lock); |
| 5491 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5492 | write_lock(&kvm->mmu_lock); |
Sean Christopherson | 14a3c4f | 2019-09-12 19:46:06 -0700 | [diff] [blame] | 5493 | trace_kvm_mmu_zap_all_fast(kvm); |
Sean Christopherson | ca333ad | 2019-09-12 19:46:11 -0700 | [diff] [blame] | 5494 | |
| 5495 | /* |
| 5496 | * Toggle mmu_valid_gen between '0' and '1'. Because slots_lock is |
| 5497 | * held for the entire duration of zapping obsolete pages, it's |
| 5498 | * impossible for there to be multiple invalid generations associated |
| 5499 | * with *valid* shadow pages at any given time, i.e. there is exactly |
| 5500 | * one valid generation and (at most) one invalid generation. |
| 5501 | */ |
| 5502 | kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5503 | |
Ben Gardon | b7cccd39 | 2021-04-01 16:37:35 -0700 | [diff] [blame] | 5504 | /* In order to ensure all threads see this change when |
| 5505 | * handling the MMU reload signal, this must happen in the |
| 5506 | * same critical section as kvm_reload_remote_mmus, and |
| 5507 | * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages |
| 5508 | * could drop the MMU lock and yield. |
| 5509 | */ |
| 5510 | if (is_tdp_mmu_enabled(kvm)) |
| 5511 | kvm_tdp_mmu_invalidate_all_roots(kvm); |
| 5512 | |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5513 | /* |
| 5514 | * Notify all vcpus to reload its shadow page table and flush TLB. |
| 5515 | * Then all vcpus will switch to new shadow page table with the new |
| 5516 | * mmu_valid_gen. |
| 5517 | * |
| 5518 | * Note: we need to do this under the protection of mmu_lock, |
| 5519 | * otherwise, vcpu would purge shadow page but miss tlb flush. |
| 5520 | */ |
| 5521 | kvm_reload_remote_mmus(kvm); |
| 5522 | |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5523 | kvm_zap_obsolete_pages(kvm); |
Ben Gardon | faaf05b0 | 2020-10-14 11:26:47 -0700 | [diff] [blame] | 5524 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5525 | write_unlock(&kvm->mmu_lock); |
Ben Gardon | 4c6654b | 2021-04-01 16:37:36 -0700 | [diff] [blame] | 5526 | |
| 5527 | if (is_tdp_mmu_enabled(kvm)) { |
| 5528 | read_lock(&kvm->mmu_lock); |
| 5529 | kvm_tdp_mmu_zap_invalidated_roots(kvm); |
| 5530 | read_unlock(&kvm->mmu_lock); |
| 5531 | } |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5532 | } |
| 5533 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5534 | static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) |
| 5535 | { |
| 5536 | return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); |
| 5537 | } |
| 5538 | |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5539 | static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, |
| 5540 | struct kvm_memory_slot *slot, |
| 5541 | struct kvm_page_track_notifier_node *node) |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5542 | { |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5543 | kvm_mmu_zap_all_fast(kvm); |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5544 | } |
| 5545 | |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5546 | void kvm_mmu_init_vm(struct kvm *kvm) |
| 5547 | { |
| 5548 | struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; |
| 5549 | |
Ben Gardon | d501f74 | 2021-05-18 10:34:14 -0700 | [diff] [blame] | 5550 | if (!kvm_mmu_init_tdp_mmu(kvm)) |
| 5551 | /* |
| 5552 | * No smp_load/store wrappers needed here as we are in |
| 5553 | * VM init and there cannot be any memslots / other threads |
| 5554 | * accessing this struct kvm yet. |
| 5555 | */ |
| 5556 | kvm->arch.memslots_have_rmaps = true; |
Ben Gardon | a255740 | 2021-05-18 10:34:12 -0700 | [diff] [blame] | 5557 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5558 | node->track_write = kvm_mmu_pte_write; |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5559 | node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; |
| 5560 | kvm_page_track_register_notifier(kvm, node); |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5561 | } |
| 5562 | |
| 5563 | void kvm_mmu_uninit_vm(struct kvm *kvm) |
| 5564 | { |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5565 | struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5566 | |
| 5567 | kvm_page_track_unregister_notifier(kvm, node); |
Ben Gardon | fe5db27 | 2020-10-14 11:26:43 -0700 | [diff] [blame] | 5568 | |
| 5569 | kvm_mmu_uninit_tdp_mmu(kvm); |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5570 | } |
| 5571 | |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5572 | void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5573 | { |
| 5574 | struct kvm_memslots *slots; |
| 5575 | struct kvm_memory_slot *memslot; |
| 5576 | int i; |
Sean Christopherson | 1a61b7d | 2021-03-25 19:19:43 -0700 | [diff] [blame] | 5577 | bool flush = false; |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5578 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 5579 | if (kvm_memslots_have_rmaps(kvm)) { |
| 5580 | write_lock(&kvm->mmu_lock); |
| 5581 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 5582 | slots = __kvm_memslots(kvm, i); |
| 5583 | kvm_for_each_memslot(memslot, slots) { |
| 5584 | gfn_t start, end; |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5585 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 5586 | start = max(gfn_start, memslot->base_gfn); |
| 5587 | end = min(gfn_end, memslot->base_gfn + memslot->npages); |
| 5588 | if (start >= end) |
| 5589 | continue; |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5590 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 5591 | flush = slot_handle_level_range(kvm, memslot, |
| 5592 | kvm_zap_rmapp, PG_LEVEL_4K, |
| 5593 | KVM_MAX_HUGEPAGE_LEVEL, start, |
| 5594 | end - 1, true, flush); |
| 5595 | } |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5596 | } |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 5597 | if (flush) |
| 5598 | kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end); |
| 5599 | write_unlock(&kvm->mmu_lock); |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5600 | } |
| 5601 | |
Ben Gardon | 6103bc0 | 2021-04-01 16:37:32 -0700 | [diff] [blame] | 5602 | if (is_tdp_mmu_enabled(kvm)) { |
| 5603 | flush = false; |
| 5604 | |
| 5605 | read_lock(&kvm->mmu_lock); |
| 5606 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) |
| 5607 | flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start, |
| 5608 | gfn_end, flush, true); |
| 5609 | if (flush) |
| 5610 | kvm_flush_remote_tlbs_with_address(kvm, gfn_start, |
| 5611 | gfn_end); |
| 5612 | |
| 5613 | read_unlock(&kvm->mmu_lock); |
| 5614 | } |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5615 | } |
| 5616 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5617 | static bool slot_rmap_write_protect(struct kvm *kvm, |
Sean Christopherson | 0a234f5 | 2021-02-12 16:50:05 -0800 | [diff] [blame] | 5618 | struct kvm_rmap_head *rmap_head, |
| 5619 | struct kvm_memory_slot *slot) |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5620 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5621 | return __rmap_write_protect(kvm, rmap_head, false); |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5622 | } |
| 5623 | |
Dor Laor | e0fa826 | 2007-03-30 13:06:33 +0300 | [diff] [blame] | 5624 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 5625 | struct kvm_memory_slot *memslot, |
| 5626 | int start_level) |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 5627 | { |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 5628 | bool flush = false; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 5629 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 5630 | if (kvm_memslots_have_rmaps(kvm)) { |
| 5631 | write_lock(&kvm->mmu_lock); |
| 5632 | flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, |
| 5633 | start_level, KVM_MAX_HUGEPAGE_LEVEL, |
| 5634 | false); |
| 5635 | write_unlock(&kvm->mmu_lock); |
| 5636 | } |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 5637 | |
Ben Gardon | 24ae4cf | 2021-04-01 16:37:34 -0700 | [diff] [blame] | 5638 | if (is_tdp_mmu_enabled(kvm)) { |
| 5639 | read_lock(&kvm->mmu_lock); |
| 5640 | flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level); |
| 5641 | read_unlock(&kvm->mmu_lock); |
| 5642 | } |
| 5643 | |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 5644 | /* |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5645 | * We can flush all the TLBs out of the mmu lock without TLB |
| 5646 | * corruption since we just change the spte from writable to |
Xiao Guangrong | e7d11c7 | 2013-05-31 08:36:27 +0800 | [diff] [blame] | 5647 | * readonly so that we only need to care the case of changing |
| 5648 | * spte from present to present (changing the spte from present |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5649 | * to nonpresent will flush all the TLBs immediately), in other |
| 5650 | * words, the only case we care is mmu_spte_update() where we |
Sean Christopherson | 5fc3424 | 2021-02-25 12:47:43 -0800 | [diff] [blame] | 5651 | * have checked Host-writable | MMU-writable instead of |
| 5652 | * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK |
| 5653 | * anymore. |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 5654 | */ |
| 5655 | if (flush) |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 5656 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 5657 | } |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5658 | |
| 5659 | static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, |
Sean Christopherson | 0a234f5 | 2021-02-12 16:50:05 -0800 | [diff] [blame] | 5660 | struct kvm_rmap_head *rmap_head, |
| 5661 | struct kvm_memory_slot *slot) |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5662 | { |
| 5663 | u64 *sptep; |
| 5664 | struct rmap_iterator iter; |
| 5665 | int need_tlb_flush = 0; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 5666 | kvm_pfn_t pfn; |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5667 | struct kvm_mmu_page *sp; |
| 5668 | |
| 5669 | restart: |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5670 | for_each_rmap_spte(rmap_head, &iter, sptep) { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 5671 | sp = sptep_to_sp(sptep); |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5672 | pfn = spte_to_pfn(*sptep); |
| 5673 | |
| 5674 | /* |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5675 | * We cannot do huge page mapping for indirect shadow pages, |
| 5676 | * which are found on the last rmap (level = 1) when not using |
| 5677 | * tdp; such shadow pages are synced with the page table in |
| 5678 | * the guest, and the guest page table is using 4K page size |
| 5679 | * mapping if the indirect sp has level = 1. |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5680 | */ |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 5681 | if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && |
Sean Christopherson | 9eba50f | 2021-02-12 16:50:06 -0800 | [diff] [blame] | 5682 | sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn, |
| 5683 | pfn, PG_LEVEL_NUM)) { |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 5684 | pte_list_remove(rmap_head, sptep); |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 5685 | |
| 5686 | if (kvm_available_flush_tlb_with_range()) |
| 5687 | kvm_flush_remote_tlbs_with_address(kvm, sp->gfn, |
| 5688 | KVM_PAGES_PER_HPAGE(sp->role.level)); |
| 5689 | else |
| 5690 | need_tlb_flush = 1; |
| 5691 | |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 5692 | goto restart; |
| 5693 | } |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5694 | } |
| 5695 | |
| 5696 | return need_tlb_flush; |
| 5697 | } |
| 5698 | |
| 5699 | void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 5700 | const struct kvm_memory_slot *memslot) |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5701 | { |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 5702 | /* FIXME: const-ify all uses of struct kvm_memory_slot. */ |
Sean Christopherson | 9eba50f | 2021-02-12 16:50:06 -0800 | [diff] [blame] | 5703 | struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot; |
Colin Ian King | 31c6565 | 2021-06-22 16:09:12 +0100 | [diff] [blame] | 5704 | bool flush = false; |
Sean Christopherson | 9eba50f | 2021-02-12 16:50:06 -0800 | [diff] [blame] | 5705 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 5706 | if (kvm_memslots_have_rmaps(kvm)) { |
| 5707 | write_lock(&kvm->mmu_lock); |
| 5708 | flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true); |
| 5709 | if (flush) |
| 5710 | kvm_arch_flush_remote_tlbs_memslot(kvm, slot); |
| 5711 | write_unlock(&kvm->mmu_lock); |
| 5712 | } |
Ben Gardon | 2db6f77 | 2021-04-01 16:37:33 -0700 | [diff] [blame] | 5713 | |
| 5714 | if (is_tdp_mmu_enabled(kvm)) { |
Ben Gardon | 2db6f77 | 2021-04-01 16:37:33 -0700 | [diff] [blame] | 5715 | read_lock(&kvm->mmu_lock); |
| 5716 | flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush); |
| 5717 | if (flush) |
| 5718 | kvm_arch_flush_remote_tlbs_memslot(kvm, slot); |
| 5719 | read_unlock(&kvm->mmu_lock); |
| 5720 | } |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5721 | } |
| 5722 | |
Sean Christopherson | b3594ff | 2020-02-18 13:07:34 -0800 | [diff] [blame] | 5723 | void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, |
Paolo Bonzini | 6c9dd6d | 2021-04-02 17:53:09 +0200 | [diff] [blame] | 5724 | const struct kvm_memory_slot *memslot) |
Sean Christopherson | b3594ff | 2020-02-18 13:07:34 -0800 | [diff] [blame] | 5725 | { |
| 5726 | /* |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 5727 | * All current use cases for flushing the TLBs for a specific memslot |
Sean Christopherson | 302695a | 2021-03-25 19:19:41 -0700 | [diff] [blame] | 5728 | * related to dirty logging, and many do the TLB flush out of mmu_lock. |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 5729 | * The interaction between the various operations on memslot must be |
| 5730 | * serialized by slots_locks to ensure the TLB flush from one operation |
| 5731 | * is observed by any other operation on the same memslot. |
Sean Christopherson | b3594ff | 2020-02-18 13:07:34 -0800 | [diff] [blame] | 5732 | */ |
| 5733 | lockdep_assert_held(&kvm->slots_lock); |
Sean Christopherson | cec3764 | 2020-02-18 13:07:35 -0800 | [diff] [blame] | 5734 | kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, |
| 5735 | memslot->npages); |
Sean Christopherson | b3594ff | 2020-02-18 13:07:34 -0800 | [diff] [blame] | 5736 | } |
| 5737 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5738 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
| 5739 | struct kvm_memory_slot *memslot) |
| 5740 | { |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 5741 | bool flush = false; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5742 | |
Ben Gardon | e220971 | 2021-05-18 10:34:13 -0700 | [diff] [blame] | 5743 | if (kvm_memslots_have_rmaps(kvm)) { |
| 5744 | write_lock(&kvm->mmu_lock); |
| 5745 | flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, |
| 5746 | false); |
| 5747 | write_unlock(&kvm->mmu_lock); |
| 5748 | } |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5749 | |
Ben Gardon | 24ae4cf | 2021-04-01 16:37:34 -0700 | [diff] [blame] | 5750 | if (is_tdp_mmu_enabled(kvm)) { |
| 5751 | read_lock(&kvm->mmu_lock); |
| 5752 | flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); |
| 5753 | read_unlock(&kvm->mmu_lock); |
| 5754 | } |
| 5755 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5756 | /* |
| 5757 | * It's also safe to flush TLBs out of mmu lock here as currently this |
| 5758 | * function is only used for dirty logging, in which case flushing TLB |
| 5759 | * out of mmu lock also guarantees no dirty pages will be lost in |
| 5760 | * dirty_bitmap. |
| 5761 | */ |
| 5762 | if (flush) |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 5763 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5764 | } |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5765 | |
Sean Christopherson | 92f58b5 | 2019-09-12 19:46:04 -0700 | [diff] [blame] | 5766 | void kvm_mmu_zap_all(struct kvm *kvm) |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 5767 | { |
| 5768 | struct kvm_mmu_page *sp, *node; |
Sean Christopherson | 7390de1 | 2019-02-05 13:01:31 -0800 | [diff] [blame] | 5769 | LIST_HEAD(invalid_list); |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 5770 | int ign; |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5771 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5772 | write_lock(&kvm->mmu_lock); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5773 | restart: |
Sean Christopherson | 8a674ad | 2019-02-05 13:01:32 -0800 | [diff] [blame] | 5774 | list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { |
Sean Christopherson | f95eec9 | 2020-06-23 12:35:39 -0700 | [diff] [blame] | 5775 | if (WARN_ON(sp->role.invalid)) |
Sean Christopherson | 8a674ad | 2019-02-05 13:01:32 -0800 | [diff] [blame] | 5776 | continue; |
Sean Christopherson | 92f58b5 | 2019-09-12 19:46:04 -0700 | [diff] [blame] | 5777 | if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5778 | goto restart; |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5779 | if (cond_resched_rwlock_write(&kvm->mmu_lock)) |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5780 | goto restart; |
| 5781 | } |
| 5782 | |
Sean Christopherson | 4771450 | 2019-02-05 13:01:23 -0800 | [diff] [blame] | 5783 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Ben Gardon | faaf05b0 | 2020-10-14 11:26:47 -0700 | [diff] [blame] | 5784 | |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 5785 | if (is_tdp_mmu_enabled(kvm)) |
Ben Gardon | faaf05b0 | 2020-10-14 11:26:47 -0700 | [diff] [blame] | 5786 | kvm_tdp_mmu_zap_all(kvm); |
| 5787 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5788 | write_unlock(&kvm->mmu_lock); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5789 | } |
| 5790 | |
Sean Christopherson | 1524825 | 2019-02-05 12:54:17 -0800 | [diff] [blame] | 5791 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 5792 | { |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 5793 | WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 5794 | |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 5795 | gen &= MMIO_SPTE_GEN_MASK; |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 5796 | |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 5797 | /* |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 5798 | * Generation numbers are incremented in multiples of the number of |
| 5799 | * address spaces in order to provide unique generations across all |
| 5800 | * address spaces. Strip what is effectively the address space |
| 5801 | * modifier prior to checking for a wrap of the MMIO generation so |
| 5802 | * that a wrap in any address space is detected. |
| 5803 | */ |
| 5804 | gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1); |
| 5805 | |
| 5806 | /* |
| 5807 | * The very rare case: if the MMIO generation number has wrapped, |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 5808 | * zap all shadow pages. |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 5809 | */ |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 5810 | if (unlikely(gen == 0)) { |
Bandan Das | ae0f549 | 2016-11-15 01:36:18 -0500 | [diff] [blame] | 5811 | kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); |
Sean Christopherson | 92f58b5 | 2019-09-12 19:46:04 -0700 | [diff] [blame] | 5812 | kvm_mmu_zap_all_fast(kvm); |
Takuya Yoshikawa | 7a2e8aa | 2013-06-21 01:34:31 +0900 | [diff] [blame] | 5813 | } |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 5814 | } |
| 5815 | |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 5816 | static unsigned long |
| 5817 | mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5818 | { |
| 5819 | struct kvm *kvm; |
Ying Han | 1495f23 | 2011-05-24 17:12:27 -0700 | [diff] [blame] | 5820 | int nr_to_scan = sc->nr_to_scan; |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 5821 | unsigned long freed = 0; |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5822 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5823 | mutex_lock(&kvm_lock); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5824 | |
| 5825 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Jan Kiszka | 3d56cbd | 2011-12-02 18:35:24 +0100 | [diff] [blame] | 5826 | int idx; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 5827 | LIST_HEAD(invalid_list); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5828 | |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 5829 | /* |
Takuya Yoshikawa | 35f2d16 | 2012-08-20 18:35:39 +0900 | [diff] [blame] | 5830 | * Never scan more than sc->nr_to_scan VM instances. |
| 5831 | * Will not hit this condition practically since we do not try |
| 5832 | * to shrink more than one VM and it is very unlikely to see |
| 5833 | * !n_used_mmu_pages so many times. |
| 5834 | */ |
| 5835 | if (!nr_to_scan--) |
| 5836 | break; |
| 5837 | /* |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 5838 | * n_used_mmu_pages is accessed without holding kvm->mmu_lock |
| 5839 | * here. We may skip a VM instance errorneosly, but we do not |
| 5840 | * want to shrink a VM that only started to populate its MMU |
| 5841 | * anyway. |
| 5842 | */ |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5843 | if (!kvm->arch.n_used_mmu_pages && |
| 5844 | !kvm_has_zapped_obsolete_pages(kvm)) |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 5845 | continue; |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 5846 | |
Marcelo Tosatti | f656ce0 | 2009-12-23 14:35:25 -0200 | [diff] [blame] | 5847 | idx = srcu_read_lock(&kvm->srcu); |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5848 | write_lock(&kvm->mmu_lock); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5849 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5850 | if (kvm_has_zapped_obsolete_pages(kvm)) { |
| 5851 | kvm_mmu_commit_zap_page(kvm, |
| 5852 | &kvm->arch.zapped_obsolete_pages); |
| 5853 | goto unlock; |
| 5854 | } |
| 5855 | |
Sean Christopherson | ebdb292 | 2020-06-23 12:35:41 -0700 | [diff] [blame] | 5856 | freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan); |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 5857 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5858 | unlock: |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 5859 | write_unlock(&kvm->mmu_lock); |
Marcelo Tosatti | f656ce0 | 2009-12-23 14:35:25 -0200 | [diff] [blame] | 5860 | srcu_read_unlock(&kvm->srcu, idx); |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 5861 | |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 5862 | /* |
| 5863 | * unfair on small ones |
| 5864 | * per-vm shrinkers cry out |
| 5865 | * sadness comes quickly |
| 5866 | */ |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 5867 | list_move_tail(&kvm->vm_list, &vm_list); |
| 5868 | break; |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5869 | } |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5870 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 5871 | mutex_unlock(&kvm_lock); |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 5872 | return freed; |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 5873 | } |
| 5874 | |
| 5875 | static unsigned long |
| 5876 | mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
| 5877 | { |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 5878 | return percpu_counter_read_positive(&kvm_total_used_mmu_pages); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5879 | } |
| 5880 | |
| 5881 | static struct shrinker mmu_shrinker = { |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 5882 | .count_objects = mmu_shrink_count, |
| 5883 | .scan_objects = mmu_shrink_scan, |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5884 | .seeks = DEFAULT_SEEKS * 10, |
| 5885 | }; |
| 5886 | |
Ingo Molnar | 2ddfd20 | 2008-05-22 10:37:48 +0200 | [diff] [blame] | 5887 | static void mmu_destroy_caches(void) |
Dor Laor | e0fa826 | 2007-03-30 13:06:33 +0300 | [diff] [blame] | 5888 | { |
Tim Hansen | c1bd743 | 2017-10-07 23:15:23 -0400 | [diff] [blame] | 5889 | kmem_cache_destroy(pte_list_desc_cache); |
| 5890 | kmem_cache_destroy(mmu_page_header_cache); |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 5891 | } |
| 5892 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 5893 | static bool get_nx_auto_mode(void) |
| 5894 | { |
| 5895 | /* Return true when CPU has the bug, and mitigations are ON */ |
| 5896 | return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off(); |
| 5897 | } |
| 5898 | |
| 5899 | static void __set_nx_huge_pages(bool val) |
| 5900 | { |
| 5901 | nx_huge_pages = itlb_multihit_kvm_mitigation = val; |
| 5902 | } |
| 5903 | |
| 5904 | static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) |
| 5905 | { |
| 5906 | bool old_val = nx_huge_pages; |
| 5907 | bool new_val; |
| 5908 | |
| 5909 | /* In "auto" mode deploy workaround only if CPU has the bug. */ |
| 5910 | if (sysfs_streq(val, "off")) |
| 5911 | new_val = 0; |
| 5912 | else if (sysfs_streq(val, "force")) |
| 5913 | new_val = 1; |
| 5914 | else if (sysfs_streq(val, "auto")) |
| 5915 | new_val = get_nx_auto_mode(); |
| 5916 | else if (strtobool(val, &new_val) < 0) |
| 5917 | return -EINVAL; |
| 5918 | |
| 5919 | __set_nx_huge_pages(new_val); |
| 5920 | |
| 5921 | if (new_val != old_val) { |
| 5922 | struct kvm *kvm; |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 5923 | |
| 5924 | mutex_lock(&kvm_lock); |
| 5925 | |
| 5926 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Sean Christopherson | ed69a6c | 2019-11-13 11:30:32 -0800 | [diff] [blame] | 5927 | mutex_lock(&kvm->slots_lock); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 5928 | kvm_mmu_zap_all_fast(kvm); |
Sean Christopherson | ed69a6c | 2019-11-13 11:30:32 -0800 | [diff] [blame] | 5929 | mutex_unlock(&kvm->slots_lock); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 5930 | |
| 5931 | wake_up_process(kvm->arch.nx_lpage_recovery_thread); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 5932 | } |
| 5933 | mutex_unlock(&kvm_lock); |
| 5934 | } |
| 5935 | |
| 5936 | return 0; |
| 5937 | } |
| 5938 | |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 5939 | int kvm_mmu_module_init(void) |
| 5940 | { |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 5941 | int ret = -ENOMEM; |
| 5942 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 5943 | if (nx_huge_pages == -1) |
| 5944 | __set_nx_huge_pages(get_nx_auto_mode()); |
| 5945 | |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 5946 | /* |
| 5947 | * MMU roles use union aliasing which is, generally speaking, an |
| 5948 | * undefined behavior. However, we supposedly know how compilers behave |
| 5949 | * and the current status quo is unlikely to change. Guardians below are |
| 5950 | * supposed to let us know if the assumption becomes false. |
| 5951 | */ |
| 5952 | BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32)); |
| 5953 | BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32)); |
| 5954 | BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64)); |
| 5955 | |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 5956 | kvm_mmu_reset_all_pte_masks(); |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 5957 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 5958 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", |
| 5959 | sizeof(struct pte_list_desc), |
Shakeel Butt | 46bea48 | 2017-10-05 18:07:24 -0700 | [diff] [blame] | 5960 | 0, SLAB_ACCOUNT, NULL); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 5961 | if (!pte_list_desc_cache) |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 5962 | goto out; |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 5963 | |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 5964 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", |
| 5965 | sizeof(struct kvm_mmu_page), |
Shakeel Butt | 46bea48 | 2017-10-05 18:07:24 -0700 | [diff] [blame] | 5966 | 0, SLAB_ACCOUNT, NULL); |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 5967 | if (!mmu_page_header_cache) |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 5968 | goto out; |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 5969 | |
Tejun Heo | 908c7f1 | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 5970 | if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 5971 | goto out; |
Wei Yongjun | 45bf21a | 2010-08-23 16:13:15 +0800 | [diff] [blame] | 5972 | |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 5973 | ret = register_shrinker(&mmu_shrinker); |
| 5974 | if (ret) |
| 5975 | goto out; |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5976 | |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 5977 | return 0; |
| 5978 | |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 5979 | out: |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5980 | mmu_destroy_caches(); |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 5981 | return ret; |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 5982 | } |
| 5983 | |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 5984 | /* |
Peng Hao | 39337ad | 2018-10-04 11:45:00 -0400 | [diff] [blame] | 5985 | * Calculate mmu pages needed for kvm. |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 5986 | */ |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 5987 | unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 5988 | { |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 5989 | unsigned long nr_mmu_pages; |
| 5990 | unsigned long nr_pages = 0; |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 5991 | struct kvm_memslots *slots; |
Xiao Guangrong | be6ba0f | 2011-11-24 17:39:18 +0800 | [diff] [blame] | 5992 | struct kvm_memory_slot *memslot; |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5993 | int i; |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 5994 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5995 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 5996 | slots = __kvm_memslots(kvm, i); |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 5997 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5998 | kvm_for_each_memslot(memslot, slots) |
| 5999 | nr_pages += memslot->npages; |
| 6000 | } |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6001 | |
| 6002 | nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 6003 | nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6004 | |
| 6005 | return nr_mmu_pages; |
| 6006 | } |
| 6007 | |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 6008 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu) |
| 6009 | { |
Paolo Bonzini | 95f93af | 2013-10-02 16:56:12 +0200 | [diff] [blame] | 6010 | kvm_mmu_unload(vcpu); |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 6011 | free_mmu_pages(&vcpu->arch.root_mmu); |
| 6012 | free_mmu_pages(&vcpu->arch.guest_mmu); |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 6013 | mmu_free_memory_caches(vcpu); |
Xiao Guangrong | b034cf0 | 2010-12-23 16:08:35 +0800 | [diff] [blame] | 6014 | } |
| 6015 | |
Xiao Guangrong | b034cf0 | 2010-12-23 16:08:35 +0800 | [diff] [blame] | 6016 | void kvm_mmu_module_exit(void) |
| 6017 | { |
| 6018 | mmu_destroy_caches(); |
| 6019 | percpu_counter_destroy(&kvm_total_used_mmu_pages); |
| 6020 | unregister_shrinker(&mmu_shrinker); |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 6021 | mmu_audit_disable(); |
| 6022 | } |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6023 | |
| 6024 | static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp) |
| 6025 | { |
| 6026 | unsigned int old_val; |
| 6027 | int err; |
| 6028 | |
| 6029 | old_val = nx_huge_pages_recovery_ratio; |
| 6030 | err = param_set_uint(val, kp); |
| 6031 | if (err) |
| 6032 | return err; |
| 6033 | |
| 6034 | if (READ_ONCE(nx_huge_pages) && |
| 6035 | !old_val && nx_huge_pages_recovery_ratio) { |
| 6036 | struct kvm *kvm; |
| 6037 | |
| 6038 | mutex_lock(&kvm_lock); |
| 6039 | |
| 6040 | list_for_each_entry(kvm, &vm_list, vm_list) |
| 6041 | wake_up_process(kvm->arch.nx_lpage_recovery_thread); |
| 6042 | |
| 6043 | mutex_unlock(&kvm_lock); |
| 6044 | } |
| 6045 | |
| 6046 | return err; |
| 6047 | } |
| 6048 | |
| 6049 | static void kvm_recover_nx_lpages(struct kvm *kvm) |
| 6050 | { |
Sean Christopherson | ade74e1 | 2021-06-15 09:29:05 -0700 | [diff] [blame] | 6051 | unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits; |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6052 | int rcu_idx; |
| 6053 | struct kvm_mmu_page *sp; |
| 6054 | unsigned int ratio; |
| 6055 | LIST_HEAD(invalid_list); |
Sean Christopherson | 048f498 | 2021-03-25 13:01:18 -0700 | [diff] [blame] | 6056 | bool flush = false; |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6057 | ulong to_zap; |
| 6058 | |
| 6059 | rcu_idx = srcu_read_lock(&kvm->srcu); |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 6060 | write_lock(&kvm->mmu_lock); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6061 | |
| 6062 | ratio = READ_ONCE(nx_huge_pages_recovery_ratio); |
Sean Christopherson | ade74e1 | 2021-06-15 09:29:05 -0700 | [diff] [blame] | 6063 | to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0; |
Sean Christopherson | 7d919c7 | 2020-09-23 11:37:29 -0700 | [diff] [blame] | 6064 | for ( ; to_zap; --to_zap) { |
| 6065 | if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) |
| 6066 | break; |
| 6067 | |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6068 | /* |
| 6069 | * We use a separate list instead of just using active_mmu_pages |
| 6070 | * because the number of lpage_disallowed pages is expected to |
| 6071 | * be relatively small compared to the total. |
| 6072 | */ |
| 6073 | sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages, |
| 6074 | struct kvm_mmu_page, |
| 6075 | lpage_disallowed_link); |
| 6076 | WARN_ON_ONCE(!sp->lpage_disallowed); |
Paolo Bonzini | 897218f | 2021-02-06 09:53:33 -0500 | [diff] [blame] | 6077 | if (is_tdp_mmu_page(sp)) { |
Paolo Bonzini | 315f02c | 2021-04-06 11:08:51 -0400 | [diff] [blame] | 6078 | flush |= kvm_tdp_mmu_zap_sp(kvm, sp); |
Ben Gardon | 8d1a182 | 2021-02-02 10:57:15 -0800 | [diff] [blame] | 6079 | } else { |
Ben Gardon | 29cf0f5 | 2020-10-14 11:27:00 -0700 | [diff] [blame] | 6080 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
| 6081 | WARN_ON_ONCE(sp->lpage_disallowed); |
| 6082 | } |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6083 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 6084 | if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { |
Sean Christopherson | 048f498 | 2021-03-25 13:01:18 -0700 | [diff] [blame] | 6085 | kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 6086 | cond_resched_rwlock_write(&kvm->mmu_lock); |
Sean Christopherson | 048f498 | 2021-03-25 13:01:18 -0700 | [diff] [blame] | 6087 | flush = false; |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6088 | } |
| 6089 | } |
Sean Christopherson | 048f498 | 2021-03-25 13:01:18 -0700 | [diff] [blame] | 6090 | kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6091 | |
Ben Gardon | 531810c | 2021-02-02 10:57:24 -0800 | [diff] [blame] | 6092 | write_unlock(&kvm->mmu_lock); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6093 | srcu_read_unlock(&kvm->srcu, rcu_idx); |
| 6094 | } |
| 6095 | |
| 6096 | static long get_nx_lpage_recovery_timeout(u64 start_time) |
| 6097 | { |
| 6098 | return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio) |
| 6099 | ? start_time + 60 * HZ - get_jiffies_64() |
| 6100 | : MAX_SCHEDULE_TIMEOUT; |
| 6101 | } |
| 6102 | |
| 6103 | static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) |
| 6104 | { |
| 6105 | u64 start_time; |
| 6106 | long remaining_time; |
| 6107 | |
| 6108 | while (true) { |
| 6109 | start_time = get_jiffies_64(); |
| 6110 | remaining_time = get_nx_lpage_recovery_timeout(start_time); |
| 6111 | |
| 6112 | set_current_state(TASK_INTERRUPTIBLE); |
| 6113 | while (!kthread_should_stop() && remaining_time > 0) { |
| 6114 | schedule_timeout(remaining_time); |
| 6115 | remaining_time = get_nx_lpage_recovery_timeout(start_time); |
| 6116 | set_current_state(TASK_INTERRUPTIBLE); |
| 6117 | } |
| 6118 | |
| 6119 | set_current_state(TASK_RUNNING); |
| 6120 | |
| 6121 | if (kthread_should_stop()) |
| 6122 | return 0; |
| 6123 | |
| 6124 | kvm_recover_nx_lpages(kvm); |
| 6125 | } |
| 6126 | } |
| 6127 | |
| 6128 | int kvm_mmu_post_init_vm(struct kvm *kvm) |
| 6129 | { |
| 6130 | int err; |
| 6131 | |
| 6132 | err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0, |
| 6133 | "kvm-nx-lpage-recovery", |
| 6134 | &kvm->arch.nx_lpage_recovery_thread); |
| 6135 | if (!err) |
| 6136 | kthread_unpark(kvm->arch.nx_lpage_recovery_thread); |
| 6137 | |
| 6138 | return err; |
| 6139 | } |
| 6140 | |
| 6141 | void kvm_mmu_pre_destroy_vm(struct kvm *kvm) |
| 6142 | { |
| 6143 | if (kvm->arch.nx_lpage_recovery_thread) |
| 6144 | kthread_stop(kvm->arch.nx_lpage_recovery_thread); |
| 6145 | } |