Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * This module enables machines with Intel VT-x extensions to run virtual |
| 6 | * machines without emulation or binary translation. |
| 7 | * |
| 8 | * MMU support |
| 9 | * |
| 10 | * Copyright (C) 2006 Qumranet, Inc. |
Nicolas Kaiser | 9611c18 | 2010-10-06 14:23:22 +0200 | [diff] [blame] | 11 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 12 | * |
| 13 | * Authors: |
| 14 | * Yaniv Kamay <yaniv@qumranet.com> |
| 15 | * Avi Kivity <avi@qumranet.com> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 16 | */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 17 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 18 | #include "irq.h" |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 19 | #include "mmu.h" |
Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 20 | #include "x86.h" |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 21 | #include "kvm_cache_regs.h" |
Sean Christopherson | 2f728d6 | 2020-02-18 15:29:49 -0800 | [diff] [blame] | 22 | #include "kvm_emulate.h" |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 23 | #include "cpuid.h" |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 24 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 25 | #include <linux/kvm_host.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 26 | #include <linux/types.h> |
| 27 | #include <linux/string.h> |
| 28 | #include <linux/mm.h> |
| 29 | #include <linux/highmem.h> |
Paul Gortmaker | 1767e93 | 2016-07-13 20:19:00 -0400 | [diff] [blame] | 30 | #include <linux/moduleparam.h> |
| 31 | #include <linux/export.h> |
Izik Eidus | 448353c | 2007-11-26 14:08:14 +0200 | [diff] [blame] | 32 | #include <linux/swap.h> |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 33 | #include <linux/hugetlb.h> |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 34 | #include <linux/compiler.h> |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 35 | #include <linux/srcu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 36 | #include <linux/slab.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 37 | #include <linux/sched/signal.h> |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 38 | #include <linux/uaccess.h> |
David Matlack | 114df30 | 2016-12-19 13:58:25 -0800 | [diff] [blame] | 39 | #include <linux/hash.h> |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 40 | #include <linux/kern_levels.h> |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 41 | #include <linux/kthread.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 42 | |
| 43 | #include <asm/page.h> |
Ingo Molnar | eb243d1 | 2019-11-20 15:33:57 +0100 | [diff] [blame] | 44 | #include <asm/memtype.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 45 | #include <asm/cmpxchg.h> |
KarimAllah Ahmed | 0c55671 | 2019-01-31 21:24:44 +0100 | [diff] [blame] | 46 | #include <asm/e820/api.h> |
Avi Kivity | 4e54237 | 2007-11-21 14:08:40 +0200 | [diff] [blame] | 47 | #include <asm/io.h> |
Eduardo Habkost | 13673a9 | 2008-11-17 19:03:13 -0200 | [diff] [blame] | 48 | #include <asm/vmx.h> |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 49 | #include <asm/kvm_page_track.h> |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 50 | #include "trace.h" |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 51 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 52 | extern bool itlb_multihit_kvm_mitigation; |
| 53 | |
| 54 | static int __read_mostly nx_huge_pages = -1; |
Paolo Bonzini | 13fb592 | 2019-11-13 15:47:06 +0100 | [diff] [blame] | 55 | #ifdef CONFIG_PREEMPT_RT |
| 56 | /* Recovery can cause latency spikes, disable it for PREEMPT_RT. */ |
| 57 | static uint __read_mostly nx_huge_pages_recovery_ratio = 0; |
| 58 | #else |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 59 | static uint __read_mostly nx_huge_pages_recovery_ratio = 60; |
Paolo Bonzini | 13fb592 | 2019-11-13 15:47:06 +0100 | [diff] [blame] | 60 | #endif |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 61 | |
| 62 | static int set_nx_huge_pages(const char *val, const struct kernel_param *kp); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 63 | static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 64 | |
| 65 | static struct kernel_param_ops nx_huge_pages_ops = { |
| 66 | .set = set_nx_huge_pages, |
| 67 | .get = param_get_bool, |
| 68 | }; |
| 69 | |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 70 | static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = { |
| 71 | .set = set_nx_huge_pages_recovery_ratio, |
| 72 | .get = param_get_uint, |
| 73 | }; |
| 74 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 75 | module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644); |
| 76 | __MODULE_PARM_TYPE(nx_huge_pages, "bool"); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 77 | module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops, |
| 78 | &nx_huge_pages_recovery_ratio, 0644); |
| 79 | __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint"); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 80 | |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 81 | /* |
| 82 | * When setting this variable to true it enables Two-Dimensional-Paging |
| 83 | * where the hardware walks 2 page tables: |
| 84 | * 1. the guest-virtual to guest-physical |
| 85 | * 2. while doing 1. it walks guest-physical to host-physical |
| 86 | * If the hardware supports that we don't need to do shadow paging. |
| 87 | */ |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 88 | bool tdp_enabled = false; |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 89 | |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 90 | static int max_page_level __read_mostly; |
| 91 | |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 92 | enum { |
| 93 | AUDIT_PRE_PAGE_FAULT, |
| 94 | AUDIT_POST_PAGE_FAULT, |
| 95 | AUDIT_PRE_PTE_WRITE, |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 96 | AUDIT_POST_PTE_WRITE, |
| 97 | AUDIT_PRE_SYNC, |
| 98 | AUDIT_POST_SYNC |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 99 | }; |
| 100 | |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 101 | #undef MMU_DEBUG |
| 102 | |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 103 | #ifdef MMU_DEBUG |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 104 | static bool dbg = 0; |
| 105 | module_param(dbg, bool, 0644); |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 106 | |
| 107 | #define pgprintk(x...) do { if (dbg) printk(x); } while (0) |
| 108 | #define rmap_printk(x...) do { if (dbg) printk(x); } while (0) |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 109 | #define MMU_WARN_ON(x) WARN_ON(x) |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 110 | #else |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 111 | #define pgprintk(x...) do { } while (0) |
| 112 | #define rmap_printk(x...) do { } while (0) |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 113 | #define MMU_WARN_ON(x) do { } while (0) |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 114 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 115 | |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 116 | #define PTE_PREFETCH_NUM 8 |
| 117 | |
Xudong Hao | 00763e4 | 2012-06-07 18:26:07 +0800 | [diff] [blame] | 118 | #define PT_FIRST_AVAIL_BITS_SHIFT 10 |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 119 | #define PT64_SECOND_AVAIL_BITS_SHIFT 54 |
| 120 | |
| 121 | /* |
| 122 | * The mask used to denote special SPTEs, which can be either MMIO SPTEs or |
| 123 | * Access Tracking SPTEs. |
| 124 | */ |
| 125 | #define SPTE_SPECIAL_MASK (3ULL << 52) |
| 126 | #define SPTE_AD_ENABLED_MASK (0ULL << 52) |
| 127 | #define SPTE_AD_DISABLED_MASK (1ULL << 52) |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 128 | #define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52) |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 129 | #define SPTE_MMIO_MASK (3ULL << 52) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 130 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 131 | #define PT64_LEVEL_BITS 9 |
| 132 | |
| 133 | #define PT64_LEVEL_SHIFT(level) \ |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 134 | (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 135 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 136 | #define PT64_INDEX(address, level)\ |
| 137 | (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) |
| 138 | |
| 139 | |
| 140 | #define PT32_LEVEL_BITS 10 |
| 141 | |
| 142 | #define PT32_LEVEL_SHIFT(level) \ |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 143 | (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 144 | |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 145 | #define PT32_LVL_OFFSET_MASK(level) \ |
| 146 | (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 147 | * PT32_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 148 | |
| 149 | #define PT32_INDEX(address, level)\ |
| 150 | (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) |
| 151 | |
| 152 | |
Kai Huang | 8acc099 | 2019-01-15 17:28:40 +1300 | [diff] [blame] | 153 | #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK |
| 154 | #define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1)) |
| 155 | #else |
| 156 | #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) |
| 157 | #endif |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 158 | #define PT64_LVL_ADDR_MASK(level) \ |
| 159 | (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 160 | * PT64_LEVEL_BITS))) - 1)) |
| 161 | #define PT64_LVL_OFFSET_MASK(level) \ |
| 162 | (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 163 | * PT64_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 164 | |
| 165 | #define PT32_BASE_ADDR_MASK PAGE_MASK |
| 166 | #define PT32_DIR_BASE_ADDR_MASK \ |
| 167 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 168 | #define PT32_LVL_ADDR_MASK(level) \ |
| 169 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 170 | * PT32_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 171 | |
Gleb Natapov | 5316622 | 2013-08-05 11:07:14 +0300 | [diff] [blame] | 172 | #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 173 | | shadow_x_mask | shadow_nx_mask | shadow_me_mask) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 174 | |
Avi Kivity | fe135d2 | 2007-12-09 16:15:46 +0200 | [diff] [blame] | 175 | #define ACC_EXEC_MASK 1 |
| 176 | #define ACC_WRITE_MASK PT_WRITABLE_MASK |
| 177 | #define ACC_USER_MASK PT_USER_MASK |
| 178 | #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) |
| 179 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 180 | /* The mask for the R/X bits in EPT PTEs */ |
| 181 | #define PT64_EPT_READABLE_MASK 0x1ull |
| 182 | #define PT64_EPT_EXECUTABLE_MASK 0x4ull |
| 183 | |
Avi Kivity | 90bb6fc | 2009-12-31 12:10:16 +0200 | [diff] [blame] | 184 | #include <trace/events/kvm.h> |
| 185 | |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 186 | #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) |
| 187 | #define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) |
Izik Eidus | 1403283 | 2009-09-23 21:47:17 +0300 | [diff] [blame] | 188 | |
Avi Kivity | 135f8c2 | 2008-08-21 17:49:56 +0300 | [diff] [blame] | 189 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
| 190 | |
Takuya Yoshikawa | 220f773 | 2012-03-21 23:49:39 +0900 | [diff] [blame] | 191 | /* make pte_list_desc fit well in cache line */ |
| 192 | #define PTE_LIST_EXT 3 |
| 193 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 194 | /* |
| 195 | * Return values of handle_mmio_page_fault and mmu.page_fault: |
| 196 | * RET_PF_RETRY: let CPU fault again on the address. |
| 197 | * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. |
| 198 | * |
| 199 | * For handle_mmio_page_fault only: |
| 200 | * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. |
| 201 | */ |
| 202 | enum { |
| 203 | RET_PF_RETRY = 0, |
| 204 | RET_PF_EMULATE = 1, |
| 205 | RET_PF_INVALID = 2, |
| 206 | }; |
| 207 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 208 | struct pte_list_desc { |
| 209 | u64 *sptes[PTE_LIST_EXT]; |
| 210 | struct pte_list_desc *more; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 211 | }; |
| 212 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 213 | struct kvm_shadow_walk_iterator { |
| 214 | u64 addr; |
| 215 | hpa_t shadow_addr; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 216 | u64 *sptep; |
Xiao Guangrong | dd3bfd5 | 2011-07-12 03:32:54 +0800 | [diff] [blame] | 217 | int level; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 218 | unsigned index; |
| 219 | }; |
| 220 | |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 221 | #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \ |
| 222 | for (shadow_walk_init_using_root(&(_walker), (_vcpu), \ |
| 223 | (_root), (_addr)); \ |
| 224 | shadow_walk_okay(&(_walker)); \ |
| 225 | shadow_walk_next(&(_walker))) |
| 226 | |
| 227 | #define for_each_shadow_entry(_vcpu, _addr, _walker) \ |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 228 | for (shadow_walk_init(&(_walker), _vcpu, _addr); \ |
| 229 | shadow_walk_okay(&(_walker)); \ |
| 230 | shadow_walk_next(&(_walker))) |
| 231 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 232 | #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ |
| 233 | for (shadow_walk_init(&(_walker), _vcpu, _addr); \ |
| 234 | shadow_walk_okay(&(_walker)) && \ |
| 235 | ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ |
| 236 | __shadow_walk_next(&(_walker), spte)) |
| 237 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 238 | static struct kmem_cache *pte_list_desc_cache; |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 239 | static struct kmem_cache *mmu_page_header_cache; |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 240 | static struct percpu_counter kvm_total_used_mmu_pages; |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 241 | |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 242 | static u64 __read_mostly shadow_nx_mask; |
| 243 | static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ |
| 244 | static u64 __read_mostly shadow_user_mask; |
| 245 | static u64 __read_mostly shadow_accessed_mask; |
| 246 | static u64 __read_mostly shadow_dirty_mask; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 247 | static u64 __read_mostly shadow_mmio_mask; |
Peter Feiner | dcdca5f | 2017-06-30 17:26:30 -0700 | [diff] [blame] | 248 | static u64 __read_mostly shadow_mmio_value; |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 249 | static u64 __read_mostly shadow_mmio_access_mask; |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 250 | static u64 __read_mostly shadow_present_mask; |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 251 | static u64 __read_mostly shadow_me_mask; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 252 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 253 | /* |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 254 | * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK; |
| 255 | * shadow_acc_track_mask is the set of bits to be cleared in non-accessed |
| 256 | * pages. |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 257 | */ |
| 258 | static u64 __read_mostly shadow_acc_track_mask; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 259 | |
| 260 | /* |
| 261 | * The mask/shift to use for saving the original R/X bits when marking the PTE |
| 262 | * as not-present for access tracking purposes. We do not save the W bit as the |
| 263 | * PTEs being access tracked also need to be dirty tracked, so the W bit will be |
| 264 | * restored only when a write is attempted to the page. |
| 265 | */ |
| 266 | static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK | |
| 267 | PT64_EPT_EXECUTABLE_MASK; |
| 268 | static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT; |
| 269 | |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 270 | /* |
| 271 | * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order |
| 272 | * to guard against L1TF attacks. |
| 273 | */ |
| 274 | static u64 __read_mostly shadow_nonpresent_or_rsvd_mask; |
| 275 | |
| 276 | /* |
| 277 | * The number of high-order 1 bits to use in the mask above. |
| 278 | */ |
| 279 | static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; |
| 280 | |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 281 | /* |
| 282 | * In some cases, we need to preserve the GFN of a non-present or reserved |
| 283 | * SPTE when we usurp the upper five bits of the physical address space to |
| 284 | * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll |
| 285 | * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask |
| 286 | * left into the reserved bits, i.e. the GFN in the SPTE will be split into |
| 287 | * high and low parts. This mask covers the lower bits of the GFN. |
| 288 | */ |
| 289 | static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; |
| 290 | |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 291 | /* |
| 292 | * The number of non-reserved physical address bits irrespective of features |
| 293 | * that repurpose legal bits, e.g. MKTME. |
| 294 | */ |
| 295 | static u8 __read_mostly shadow_phys_bits; |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 296 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 297 | static void mmu_spte_set(u64 *sptep, u64 spte); |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 298 | static bool is_executable_pte(u64 spte); |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 299 | static union kvm_mmu_page_role |
| 300 | kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 301 | |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 302 | #define CREATE_TRACE_POINTS |
| 303 | #include "mmutrace.h" |
| 304 | |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 305 | |
| 306 | static inline bool kvm_available_flush_tlb_with_range(void) |
| 307 | { |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 308 | return kvm_x86_ops.tlb_remote_flush_with_range; |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 309 | } |
| 310 | |
| 311 | static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, |
| 312 | struct kvm_tlb_range *range) |
| 313 | { |
| 314 | int ret = -ENOTSUPP; |
| 315 | |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 316 | if (range && kvm_x86_ops.tlb_remote_flush_with_range) |
| 317 | ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range); |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 318 | |
| 319 | if (ret) |
| 320 | kvm_flush_remote_tlbs(kvm); |
| 321 | } |
| 322 | |
| 323 | static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, |
| 324 | u64 start_gfn, u64 pages) |
| 325 | { |
| 326 | struct kvm_tlb_range range; |
| 327 | |
| 328 | range.start_gfn = start_gfn; |
| 329 | range.pages = pages; |
| 330 | |
| 331 | kvm_flush_remote_tlbs_with_range(kvm, &range); |
| 332 | } |
| 333 | |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 334 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 335 | { |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 336 | BUG_ON((u64)(unsigned)access_mask != access_mask); |
Peter Feiner | dcdca5f | 2017-06-30 17:26:30 -0700 | [diff] [blame] | 337 | BUG_ON((mmio_mask & mmio_value) != mmio_value); |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 338 | shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; |
Junaid Shahid | 312b616 | 2016-12-21 20:29:29 -0800 | [diff] [blame] | 339 | shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 340 | shadow_mmio_access_mask = access_mask; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 341 | } |
| 342 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); |
| 343 | |
Sean Christopherson | 26c44a6 | 2019-08-01 13:35:23 -0700 | [diff] [blame] | 344 | static bool is_mmio_spte(u64 spte) |
| 345 | { |
| 346 | return (spte & shadow_mmio_mask) == shadow_mmio_value; |
| 347 | } |
| 348 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 349 | static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) |
| 350 | { |
| 351 | return sp->role.ad_disabled; |
| 352 | } |
| 353 | |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 354 | static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) |
| 355 | { |
| 356 | /* |
| 357 | * When using the EPT page-modification log, the GPAs in the log |
| 358 | * would come from L2 rather than L1. Therefore, we need to rely |
| 359 | * on write protection to record dirty pages. This also bypasses |
| 360 | * PML, since writes now result in a vmexit. |
| 361 | */ |
| 362 | return vcpu->arch.mmu == &vcpu->arch.guest_mmu; |
| 363 | } |
| 364 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 365 | static inline bool spte_ad_enabled(u64 spte) |
| 366 | { |
Sean Christopherson | 26c44a6 | 2019-08-01 13:35:23 -0700 | [diff] [blame] | 367 | MMU_WARN_ON(is_mmio_spte(spte)); |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 368 | return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK; |
| 369 | } |
| 370 | |
| 371 | static inline bool spte_ad_need_write_protect(u64 spte) |
| 372 | { |
| 373 | MMU_WARN_ON(is_mmio_spte(spte)); |
| 374 | return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK; |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 375 | } |
| 376 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 377 | static bool is_nx_huge_page_enabled(void) |
| 378 | { |
| 379 | return READ_ONCE(nx_huge_pages); |
| 380 | } |
| 381 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 382 | static inline u64 spte_shadow_accessed_mask(u64 spte) |
| 383 | { |
Sean Christopherson | 26c44a6 | 2019-08-01 13:35:23 -0700 | [diff] [blame] | 384 | MMU_WARN_ON(is_mmio_spte(spte)); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 385 | return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; |
| 386 | } |
| 387 | |
| 388 | static inline u64 spte_shadow_dirty_mask(u64 spte) |
| 389 | { |
Sean Christopherson | 26c44a6 | 2019-08-01 13:35:23 -0700 | [diff] [blame] | 390 | MMU_WARN_ON(is_mmio_spte(spte)); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 391 | return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; |
| 392 | } |
| 393 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 394 | static inline bool is_access_track_spte(u64 spte) |
| 395 | { |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 396 | return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 397 | } |
| 398 | |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 399 | /* |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 400 | * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of |
| 401 | * the memslots generation and is derived as follows: |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 402 | * |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 403 | * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11 |
| 404 | * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61 |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 405 | * |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 406 | * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in |
| 407 | * the MMIO generation number, as doing so would require stealing a bit from |
| 408 | * the "real" generation number and thus effectively halve the maximum number |
| 409 | * of MMIO generations that can be handled before encountering a wrap (which |
| 410 | * requires a full MMU zap). The flag is instead explicitly queried when |
| 411 | * checking for MMIO spte cache hits. |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 412 | */ |
Paolo Bonzini | 56871d4 | 2020-01-18 20:09:03 +0100 | [diff] [blame] | 413 | #define MMIO_SPTE_GEN_MASK GENMASK_ULL(17, 0) |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 414 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 415 | #define MMIO_SPTE_GEN_LOW_START 3 |
| 416 | #define MMIO_SPTE_GEN_LOW_END 11 |
| 417 | #define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ |
| 418 | MMIO_SPTE_GEN_LOW_START) |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 419 | |
Paolo Bonzini | 56871d4 | 2020-01-18 20:09:03 +0100 | [diff] [blame] | 420 | #define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT |
| 421 | #define MMIO_SPTE_GEN_HIGH_END 62 |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 422 | #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ |
| 423 | MMIO_SPTE_GEN_HIGH_START) |
Paolo Bonzini | 56871d4 | 2020-01-18 20:09:03 +0100 | [diff] [blame] | 424 | |
Sean Christopherson | 5192f9b | 2019-02-05 13:01:15 -0800 | [diff] [blame] | 425 | static u64 generation_mmio_spte_mask(u64 gen) |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 426 | { |
| 427 | u64 mask; |
| 428 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 429 | WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); |
Paolo Bonzini | 56871d4 | 2020-01-18 20:09:03 +0100 | [diff] [blame] | 430 | BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK); |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 431 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 432 | mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK; |
| 433 | mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK; |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 434 | return mask; |
| 435 | } |
| 436 | |
Sean Christopherson | 5192f9b | 2019-02-05 13:01:15 -0800 | [diff] [blame] | 437 | static u64 get_mmio_spte_generation(u64 spte) |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 438 | { |
Sean Christopherson | 5192f9b | 2019-02-05 13:01:15 -0800 | [diff] [blame] | 439 | u64 gen; |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 440 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 441 | gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START; |
| 442 | gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START; |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 443 | return gen; |
| 444 | } |
| 445 | |
Ben Gardon | 8f79b06 | 2020-02-03 15:09:10 -0800 | [diff] [blame] | 446 | static u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 447 | { |
Ben Gardon | 8f79b06 | 2020-02-03 15:09:10 -0800 | [diff] [blame] | 448 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 449 | u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 450 | u64 mask = generation_mmio_spte_mask(gen); |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 451 | u64 gpa = gfn << PAGE_SHIFT; |
Takuya Yoshikawa | 95b0430 | 2013-03-12 17:44:40 +0900 | [diff] [blame] | 452 | |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 453 | access &= shadow_mmio_access_mask; |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 454 | mask |= shadow_mmio_value | access; |
| 455 | mask |= gpa | shadow_nonpresent_or_rsvd_mask; |
| 456 | mask |= (gpa & shadow_nonpresent_or_rsvd_mask) |
| 457 | << shadow_nonpresent_or_rsvd_mask_len; |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 458 | |
Ben Gardon | 8f79b06 | 2020-02-03 15:09:10 -0800 | [diff] [blame] | 459 | return mask; |
| 460 | } |
| 461 | |
| 462 | static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, |
| 463 | unsigned int access) |
| 464 | { |
| 465 | u64 mask = make_mmio_spte(vcpu, gfn, access); |
| 466 | unsigned int gen = get_mmio_spte_generation(mask); |
| 467 | |
| 468 | access = mask & ACC_ALL; |
| 469 | |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 470 | trace_mark_mmio_spte(sptep, gfn, access, gen); |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 471 | mmu_spte_set(sptep, mask); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 472 | } |
| 473 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 474 | static gfn_t get_mmio_spte_gfn(u64 spte) |
| 475 | { |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 476 | u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 477 | |
| 478 | gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) |
| 479 | & shadow_nonpresent_or_rsvd_mask; |
| 480 | |
| 481 | return gpa >> PAGE_SHIFT; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 482 | } |
| 483 | |
| 484 | static unsigned get_mmio_spte_access(u64 spte) |
| 485 | { |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 486 | return spte & shadow_mmio_access_mask; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 487 | } |
| 488 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 489 | static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 490 | kvm_pfn_t pfn, unsigned int access) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 491 | { |
| 492 | if (unlikely(is_noslot_pfn(pfn))) { |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 493 | mark_mmio_spte(vcpu, sptep, gfn, access); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 494 | return true; |
| 495 | } |
| 496 | |
| 497 | return false; |
| 498 | } |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 499 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 500 | static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 501 | { |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 502 | u64 kvm_gen, spte_gen, gen; |
Xiao Guangrong | 089504c | 2013-06-07 16:51:27 +0800 | [diff] [blame] | 503 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 504 | gen = kvm_vcpu_memslots(vcpu)->generation; |
| 505 | if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) |
| 506 | return false; |
| 507 | |
| 508 | kvm_gen = gen & MMIO_SPTE_GEN_MASK; |
Xiao Guangrong | 089504c | 2013-06-07 16:51:27 +0800 | [diff] [blame] | 509 | spte_gen = get_mmio_spte_generation(spte); |
| 510 | |
| 511 | trace_check_mmio_spte(spte, kvm_gen, spte_gen); |
| 512 | return likely(kvm_gen == spte_gen); |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 513 | } |
| 514 | |
Peter Feiner | ce00053 | 2017-06-30 17:26:29 -0700 | [diff] [blame] | 515 | /* |
| 516 | * Sets the shadow PTE masks used by the MMU. |
| 517 | * |
| 518 | * Assumptions: |
| 519 | * - Setting either @accessed_mask or @dirty_mask requires setting both |
| 520 | * - At least one of @accessed_mask or @acc_track_mask must be set |
| 521 | */ |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 522 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 523 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 524 | u64 acc_track_mask, u64 me_mask) |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 525 | { |
Peter Feiner | ce00053 | 2017-06-30 17:26:29 -0700 | [diff] [blame] | 526 | BUG_ON(!dirty_mask != !accessed_mask); |
| 527 | BUG_ON(!accessed_mask && !acc_track_mask); |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 528 | BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK); |
Junaid Shahid | 312b616 | 2016-12-21 20:29:29 -0800 | [diff] [blame] | 529 | |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 530 | shadow_user_mask = user_mask; |
| 531 | shadow_accessed_mask = accessed_mask; |
| 532 | shadow_dirty_mask = dirty_mask; |
| 533 | shadow_nx_mask = nx_mask; |
| 534 | shadow_x_mask = x_mask; |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 535 | shadow_present_mask = p_mask; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 536 | shadow_acc_track_mask = acc_track_mask; |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 537 | shadow_me_mask = me_mask; |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 538 | } |
| 539 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
| 540 | |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 541 | static u8 kvm_get_shadow_phys_bits(void) |
| 542 | { |
| 543 | /* |
Paolo Bonzini | 7adacf5 | 2019-12-04 15:50:27 +0100 | [diff] [blame] | 544 | * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected |
| 545 | * in CPU detection code, but the processor treats those reduced bits as |
| 546 | * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at |
| 547 | * the physical address bits reported by CPUID. |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 548 | */ |
Paolo Bonzini | 7adacf5 | 2019-12-04 15:50:27 +0100 | [diff] [blame] | 549 | if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) |
| 550 | return cpuid_eax(0x80000008) & 0xff; |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 551 | |
Paolo Bonzini | 7adacf5 | 2019-12-04 15:50:27 +0100 | [diff] [blame] | 552 | /* |
| 553 | * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with |
| 554 | * custom CPUID. Proceed with whatever the kernel found since these features |
| 555 | * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). |
| 556 | */ |
| 557 | return boot_cpu_data.x86_phys_bits; |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 558 | } |
| 559 | |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 560 | static void kvm_mmu_reset_all_pte_masks(void) |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 561 | { |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 562 | u8 low_phys_bits; |
| 563 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 564 | shadow_user_mask = 0; |
| 565 | shadow_accessed_mask = 0; |
| 566 | shadow_dirty_mask = 0; |
| 567 | shadow_nx_mask = 0; |
| 568 | shadow_x_mask = 0; |
| 569 | shadow_mmio_mask = 0; |
| 570 | shadow_present_mask = 0; |
| 571 | shadow_acc_track_mask = 0; |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 572 | |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 573 | shadow_phys_bits = kvm_get_shadow_phys_bits(); |
| 574 | |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 575 | /* |
| 576 | * If the CPU has 46 or less physical address bits, then set an |
| 577 | * appropriate mask to guard against L1TF attacks. Otherwise, it is |
| 578 | * assumed that the CPU is not vulnerable to L1TF. |
Kai Huang | 61455bf | 2019-05-03 01:40:25 -0700 | [diff] [blame] | 579 | * |
| 580 | * Some Intel CPUs address the L1 cache using more PA bits than are |
| 581 | * reported by CPUID. Use the PA width of the L1 cache when possible |
| 582 | * to achieve more effective mitigation, e.g. if system RAM overlaps |
| 583 | * the most significant bits of legal physical address space. |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 584 | */ |
Kai Huang | 61455bf | 2019-05-03 01:40:25 -0700 | [diff] [blame] | 585 | shadow_nonpresent_or_rsvd_mask = 0; |
| 586 | low_phys_bits = boot_cpu_data.x86_cache_bits; |
| 587 | if (boot_cpu_data.x86_cache_bits < |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 588 | 52 - shadow_nonpresent_or_rsvd_mask_len) { |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 589 | shadow_nonpresent_or_rsvd_mask = |
Kai Huang | 61455bf | 2019-05-03 01:40:25 -0700 | [diff] [blame] | 590 | rsvd_bits(boot_cpu_data.x86_cache_bits - |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 591 | shadow_nonpresent_or_rsvd_mask_len, |
Kai Huang | 61455bf | 2019-05-03 01:40:25 -0700 | [diff] [blame] | 592 | boot_cpu_data.x86_cache_bits - 1); |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 593 | low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; |
Kai Huang | 61455bf | 2019-05-03 01:40:25 -0700 | [diff] [blame] | 594 | } else |
| 595 | WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF)); |
| 596 | |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 597 | shadow_nonpresent_or_rsvd_lower_gfn_mask = |
| 598 | GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 599 | } |
| 600 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 601 | static int is_cpuid_PSE36(void) |
| 602 | { |
| 603 | return 1; |
| 604 | } |
| 605 | |
Avi Kivity | 73b1087 | 2007-01-26 00:56:41 -0800 | [diff] [blame] | 606 | static int is_nx(struct kvm_vcpu *vcpu) |
| 607 | { |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 608 | return vcpu->arch.efer & EFER_NX; |
Avi Kivity | 73b1087 | 2007-01-26 00:56:41 -0800 | [diff] [blame] | 609 | } |
| 610 | |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 611 | static int is_shadow_present_pte(u64 pte) |
| 612 | { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 613 | return (pte != 0) && !is_mmio_spte(pte); |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 614 | } |
| 615 | |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 616 | static int is_large_pte(u64 pte) |
| 617 | { |
| 618 | return pte & PT_PAGE_SIZE_MASK; |
| 619 | } |
| 620 | |
Marcelo Tosatti | 776e663 | 2009-06-10 12:27:03 -0300 | [diff] [blame] | 621 | static int is_last_spte(u64 pte, int level) |
| 622 | { |
| 623 | if (level == PT_PAGE_TABLE_LEVEL) |
| 624 | return 1; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 625 | if (is_large_pte(pte)) |
Marcelo Tosatti | 776e663 | 2009-06-10 12:27:03 -0300 | [diff] [blame] | 626 | return 1; |
| 627 | return 0; |
| 628 | } |
| 629 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 630 | static bool is_executable_pte(u64 spte) |
| 631 | { |
| 632 | return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask; |
| 633 | } |
| 634 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 635 | static kvm_pfn_t spte_to_pfn(u64 pte) |
Avi Kivity | 0b49ea8 | 2008-03-23 15:06:23 +0200 | [diff] [blame] | 636 | { |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 637 | return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
Avi Kivity | 0b49ea8 | 2008-03-23 15:06:23 +0200 | [diff] [blame] | 638 | } |
| 639 | |
Avi Kivity | da928521 | 2007-11-21 13:54:47 +0200 | [diff] [blame] | 640 | static gfn_t pse36_gfn_delta(u32 gpte) |
| 641 | { |
| 642 | int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; |
| 643 | |
| 644 | return (gpte & PT32_DIR_PSE36_MASK) << shift; |
| 645 | } |
| 646 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 647 | #ifdef CONFIG_X86_64 |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 648 | static void __set_spte(u64 *sptep, u64 spte) |
Avi Kivity | e663ee6 | 2007-05-31 15:46:04 +0300 | [diff] [blame] | 649 | { |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 650 | WRITE_ONCE(*sptep, spte); |
Avi Kivity | e663ee6 | 2007-05-31 15:46:04 +0300 | [diff] [blame] | 651 | } |
| 652 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 653 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
Avi Kivity | a9221dd | 2010-06-06 14:48:06 +0300 | [diff] [blame] | 654 | { |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 655 | WRITE_ONCE(*sptep, spte); |
Avi Kivity | a9221dd | 2010-06-06 14:48:06 +0300 | [diff] [blame] | 656 | } |
| 657 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 658 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
| 659 | { |
| 660 | return xchg(sptep, spte); |
| 661 | } |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 662 | |
| 663 | static u64 __get_spte_lockless(u64 *sptep) |
| 664 | { |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 665 | return READ_ONCE(*sptep); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 666 | } |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 667 | #else |
| 668 | union split_spte { |
| 669 | struct { |
| 670 | u32 spte_low; |
| 671 | u32 spte_high; |
| 672 | }; |
| 673 | u64 spte; |
| 674 | }; |
| 675 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 676 | static void count_spte_clear(u64 *sptep, u64 spte) |
| 677 | { |
| 678 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); |
| 679 | |
| 680 | if (is_shadow_present_pte(spte)) |
| 681 | return; |
| 682 | |
| 683 | /* Ensure the spte is completely set before we increase the count */ |
| 684 | smp_wmb(); |
| 685 | sp->clear_spte_count++; |
| 686 | } |
| 687 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 688 | static void __set_spte(u64 *sptep, u64 spte) |
| 689 | { |
| 690 | union split_spte *ssptep, sspte; |
| 691 | |
| 692 | ssptep = (union split_spte *)sptep; |
| 693 | sspte = (union split_spte)spte; |
| 694 | |
| 695 | ssptep->spte_high = sspte.spte_high; |
| 696 | |
| 697 | /* |
| 698 | * If we map the spte from nonpresent to present, We should store |
| 699 | * the high bits firstly, then set present bit, so cpu can not |
| 700 | * fetch this spte while we are setting the spte. |
| 701 | */ |
| 702 | smp_wmb(); |
| 703 | |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 704 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 705 | } |
| 706 | |
| 707 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
| 708 | { |
| 709 | union split_spte *ssptep, sspte; |
| 710 | |
| 711 | ssptep = (union split_spte *)sptep; |
| 712 | sspte = (union split_spte)spte; |
| 713 | |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 714 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 715 | |
| 716 | /* |
| 717 | * If we map the spte from present to nonpresent, we should clear |
| 718 | * present bit firstly to avoid vcpu fetch the old high bits. |
| 719 | */ |
| 720 | smp_wmb(); |
| 721 | |
| 722 | ssptep->spte_high = sspte.spte_high; |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 723 | count_spte_clear(sptep, spte); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 724 | } |
| 725 | |
| 726 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
| 727 | { |
| 728 | union split_spte *ssptep, sspte, orig; |
| 729 | |
| 730 | ssptep = (union split_spte *)sptep; |
| 731 | sspte = (union split_spte)spte; |
| 732 | |
| 733 | /* xchg acts as a barrier before the setting of the high bits */ |
| 734 | orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); |
Zhao Jin | 41bc318 | 2011-09-19 12:19:51 +0800 | [diff] [blame] | 735 | orig.spte_high = ssptep->spte_high; |
| 736 | ssptep->spte_high = sspte.spte_high; |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 737 | count_spte_clear(sptep, spte); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 738 | |
| 739 | return orig.spte; |
| 740 | } |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 741 | |
| 742 | /* |
| 743 | * The idea using the light way get the spte on x86_32 guest is from |
Christoph Hellwig | 39656e8 | 2019-07-11 20:56:49 -0700 | [diff] [blame] | 744 | * gup_get_pte (mm/gup.c). |
Xiao Guangrong | accaefe | 2013-06-19 17:09:20 +0800 | [diff] [blame] | 745 | * |
| 746 | * An spte tlb flush may be pending, because kvm_set_pte_rmapp |
| 747 | * coalesces them and we are running out of the MMU lock. Therefore |
| 748 | * we need to protect against in-progress updates of the spte. |
| 749 | * |
| 750 | * Reading the spte while an update is in progress may get the old value |
| 751 | * for the high part of the spte. The race is fine for a present->non-present |
| 752 | * change (because the high part of the spte is ignored for non-present spte), |
| 753 | * but for a present->present change we must reread the spte. |
| 754 | * |
| 755 | * All such changes are done in two steps (present->non-present and |
| 756 | * non-present->present), hence it is enough to count the number of |
| 757 | * present->non-present updates: if it changed while reading the spte, |
| 758 | * we might have hit the race. This is done using clear_spte_count. |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 759 | */ |
| 760 | static u64 __get_spte_lockless(u64 *sptep) |
| 761 | { |
| 762 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); |
| 763 | union split_spte spte, *orig = (union split_spte *)sptep; |
| 764 | int count; |
| 765 | |
| 766 | retry: |
| 767 | count = sp->clear_spte_count; |
| 768 | smp_rmb(); |
| 769 | |
| 770 | spte.spte_low = orig->spte_low; |
| 771 | smp_rmb(); |
| 772 | |
| 773 | spte.spte_high = orig->spte_high; |
| 774 | smp_rmb(); |
| 775 | |
| 776 | if (unlikely(spte.spte_low != orig->spte_low || |
| 777 | count != sp->clear_spte_count)) |
| 778 | goto retry; |
| 779 | |
| 780 | return spte.spte; |
| 781 | } |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 782 | #endif |
| 783 | |
Junaid Shahid | ea4114b | 2016-12-06 16:46:11 -0800 | [diff] [blame] | 784 | static bool spte_can_locklessly_be_made_writable(u64 spte) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 785 | { |
Gleb Natapov | feb3eb7 | 2013-01-30 16:45:00 +0200 | [diff] [blame] | 786 | return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == |
| 787 | (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 788 | } |
| 789 | |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 790 | static bool spte_has_volatile_bits(u64 spte) |
| 791 | { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 792 | if (!is_shadow_present_pte(spte)) |
| 793 | return false; |
| 794 | |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 795 | /* |
Adam Buchbinder | 6a6256f | 2016-02-23 15:34:30 -0800 | [diff] [blame] | 796 | * Always atomically update spte if it can be updated |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 797 | * out of mmu-lock, it can ensure dirty bit is not lost, |
| 798 | * also, it can help us to get a stable is_writable_pte() |
| 799 | * to ensure tlb flush is not missed. |
| 800 | */ |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 801 | if (spte_can_locklessly_be_made_writable(spte) || |
| 802 | is_access_track_spte(spte)) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 803 | return true; |
| 804 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 805 | if (spte_ad_enabled(spte)) { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 806 | if ((spte & shadow_accessed_mask) == 0 || |
| 807 | (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0)) |
| 808 | return true; |
| 809 | } |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 810 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 811 | return false; |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 812 | } |
| 813 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 814 | static bool is_accessed_spte(u64 spte) |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 815 | { |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 816 | u64 accessed_mask = spte_shadow_accessed_mask(spte); |
| 817 | |
| 818 | return accessed_mask ? spte & accessed_mask |
| 819 | : !is_access_track_spte(spte); |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 820 | } |
| 821 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 822 | static bool is_dirty_spte(u64 spte) |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 823 | { |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 824 | u64 dirty_mask = spte_shadow_dirty_mask(spte); |
| 825 | |
| 826 | return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK; |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 827 | } |
| 828 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 829 | /* Rules for using mmu_spte_set: |
| 830 | * Set the sptep from nonpresent to present. |
| 831 | * Note: the sptep being assigned *must* be either not present |
| 832 | * or in a state where the hardware will not attempt to update |
| 833 | * the spte. |
| 834 | */ |
| 835 | static void mmu_spte_set(u64 *sptep, u64 new_spte) |
| 836 | { |
| 837 | WARN_ON(is_shadow_present_pte(*sptep)); |
| 838 | __set_spte(sptep, new_spte); |
| 839 | } |
| 840 | |
Junaid Shahid | f39a058 | 2016-12-06 16:46:14 -0800 | [diff] [blame] | 841 | /* |
| 842 | * Update the SPTE (excluding the PFN), but do not track changes in its |
| 843 | * accessed/dirty status. |
| 844 | */ |
| 845 | static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) |
| 846 | { |
| 847 | u64 old_spte = *sptep; |
| 848 | |
| 849 | WARN_ON(!is_shadow_present_pte(new_spte)); |
| 850 | |
| 851 | if (!is_shadow_present_pte(old_spte)) { |
| 852 | mmu_spte_set(sptep, new_spte); |
| 853 | return old_spte; |
| 854 | } |
| 855 | |
| 856 | if (!spte_has_volatile_bits(old_spte)) |
| 857 | __update_clear_spte_fast(sptep, new_spte); |
| 858 | else |
| 859 | old_spte = __update_clear_spte_slow(sptep, new_spte); |
| 860 | |
| 861 | WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte)); |
| 862 | |
| 863 | return old_spte; |
| 864 | } |
| 865 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 866 | /* Rules for using mmu_spte_update: |
Andrea Gelmini | bb3541f | 2016-05-21 14:14:44 +0200 | [diff] [blame] | 867 | * Update the state bits, it means the mapped pfn is not changed. |
Xiao Guangrong | 6e7d035 | 2012-06-20 15:58:33 +0800 | [diff] [blame] | 868 | * |
| 869 | * Whenever we overwrite a writable spte with a read-only one we |
| 870 | * should flush remote TLBs. Otherwise rmap_write_protect |
| 871 | * will find a read-only spte, even though the writable spte |
| 872 | * might be cached on a CPU's TLB, the return value indicates this |
| 873 | * case. |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 874 | * |
| 875 | * Returns true if the TLB needs to be flushed |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 876 | */ |
Xiao Guangrong | 6e7d035 | 2012-06-20 15:58:33 +0800 | [diff] [blame] | 877 | static bool mmu_spte_update(u64 *sptep, u64 new_spte) |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 878 | { |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 879 | bool flush = false; |
Junaid Shahid | f39a058 | 2016-12-06 16:46:14 -0800 | [diff] [blame] | 880 | u64 old_spte = mmu_spte_update_no_track(sptep, new_spte); |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 881 | |
Junaid Shahid | f39a058 | 2016-12-06 16:46:14 -0800 | [diff] [blame] | 882 | if (!is_shadow_present_pte(old_spte)) |
| 883 | return false; |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 884 | |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 885 | /* |
| 886 | * For the spte updated out of mmu-lock is safe, since |
Adam Buchbinder | 6a6256f | 2016-02-23 15:34:30 -0800 | [diff] [blame] | 887 | * we always atomically update it, see the comments in |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 888 | * spte_has_volatile_bits(). |
| 889 | */ |
Junaid Shahid | ea4114b | 2016-12-06 16:46:11 -0800 | [diff] [blame] | 890 | if (spte_can_locklessly_be_made_writable(old_spte) && |
Xiao Guangrong | 7f31c95 | 2014-04-17 17:06:15 +0800 | [diff] [blame] | 891 | !is_writable_pte(new_spte)) |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 892 | flush = true; |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 893 | |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 894 | /* |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 895 | * Flush TLB when accessed/dirty states are changed in the page tables, |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 896 | * to guarantee consistency between TLB and page tables. |
| 897 | */ |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 898 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 899 | if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) { |
| 900 | flush = true; |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 901 | kvm_set_pfn_accessed(spte_to_pfn(old_spte)); |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 902 | } |
Xiao Guangrong | 6e7d035 | 2012-06-20 15:58:33 +0800 | [diff] [blame] | 903 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 904 | if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) { |
| 905 | flush = true; |
| 906 | kvm_set_pfn_dirty(spte_to_pfn(old_spte)); |
| 907 | } |
| 908 | |
| 909 | return flush; |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 910 | } |
| 911 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 912 | /* |
| 913 | * Rules for using mmu_spte_clear_track_bits: |
| 914 | * It sets the sptep from present to nonpresent, and track the |
| 915 | * state bits, it is used to clear the last level sptep. |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 916 | * Returns non-zero if the PTE was previously valid. |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 917 | */ |
| 918 | static int mmu_spte_clear_track_bits(u64 *sptep) |
| 919 | { |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 920 | kvm_pfn_t pfn; |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 921 | u64 old_spte = *sptep; |
| 922 | |
| 923 | if (!spte_has_volatile_bits(old_spte)) |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 924 | __update_clear_spte_fast(sptep, 0ull); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 925 | else |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 926 | old_spte = __update_clear_spte_slow(sptep, 0ull); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 927 | |
Takuya Yoshikawa | afd28fe | 2015-11-20 17:44:55 +0900 | [diff] [blame] | 928 | if (!is_shadow_present_pte(old_spte)) |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 929 | return 0; |
| 930 | |
| 931 | pfn = spte_to_pfn(old_spte); |
Xiao Guangrong | 86fde74 | 2012-07-17 21:52:52 +0800 | [diff] [blame] | 932 | |
| 933 | /* |
| 934 | * KVM does not hold the refcount of the page used by |
| 935 | * kvm mmu, before reclaiming the page, we should |
| 936 | * unmap it from mmu first. |
| 937 | */ |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 938 | WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); |
Xiao Guangrong | 86fde74 | 2012-07-17 21:52:52 +0800 | [diff] [blame] | 939 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 940 | if (is_accessed_spte(old_spte)) |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 941 | kvm_set_pfn_accessed(pfn); |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 942 | |
| 943 | if (is_dirty_spte(old_spte)) |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 944 | kvm_set_pfn_dirty(pfn); |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 945 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 946 | return 1; |
| 947 | } |
| 948 | |
| 949 | /* |
| 950 | * Rules for using mmu_spte_clear_no_track: |
| 951 | * Directly clear spte without caring the state bits of sptep, |
| 952 | * it is used to set the upper level spte. |
| 953 | */ |
| 954 | static void mmu_spte_clear_no_track(u64 *sptep) |
| 955 | { |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 956 | __update_clear_spte_fast(sptep, 0ull); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 957 | } |
| 958 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 959 | static u64 mmu_spte_get_lockless(u64 *sptep) |
| 960 | { |
| 961 | return __get_spte_lockless(sptep); |
| 962 | } |
| 963 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 964 | static u64 mark_spte_for_access_track(u64 spte) |
| 965 | { |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 966 | if (spte_ad_enabled(spte)) |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 967 | return spte & ~shadow_accessed_mask; |
| 968 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 969 | if (is_access_track_spte(spte)) |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 970 | return spte; |
| 971 | |
| 972 | /* |
Junaid Shahid | 20d6523 | 2016-12-21 20:29:31 -0800 | [diff] [blame] | 973 | * Making an Access Tracking PTE will result in removal of write access |
| 974 | * from the PTE. So, verify that we will be able to restore the write |
| 975 | * access in the fast page fault path later on. |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 976 | */ |
| 977 | WARN_ONCE((spte & PT_WRITABLE_MASK) && |
| 978 | !spte_can_locklessly_be_made_writable(spte), |
| 979 | "kvm: Writable SPTE is not locklessly dirty-trackable\n"); |
| 980 | |
| 981 | WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask << |
| 982 | shadow_acc_track_saved_bits_shift), |
| 983 | "kvm: Access Tracking saved bit locations are not zero\n"); |
| 984 | |
| 985 | spte |= (spte & shadow_acc_track_saved_bits_mask) << |
| 986 | shadow_acc_track_saved_bits_shift; |
| 987 | spte &= ~shadow_acc_track_mask; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 988 | |
| 989 | return spte; |
| 990 | } |
| 991 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 992 | /* Restore an acc-track PTE back to a regular PTE */ |
| 993 | static u64 restore_acc_track_spte(u64 spte) |
| 994 | { |
| 995 | u64 new_spte = spte; |
| 996 | u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift) |
| 997 | & shadow_acc_track_saved_bits_mask; |
| 998 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 999 | WARN_ON_ONCE(spte_ad_enabled(spte)); |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 1000 | WARN_ON_ONCE(!is_access_track_spte(spte)); |
| 1001 | |
| 1002 | new_spte &= ~shadow_acc_track_mask; |
| 1003 | new_spte &= ~(shadow_acc_track_saved_bits_mask << |
| 1004 | shadow_acc_track_saved_bits_shift); |
| 1005 | new_spte |= saved_bits; |
| 1006 | |
| 1007 | return new_spte; |
| 1008 | } |
| 1009 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1010 | /* Returns the Accessed status of the PTE and resets it at the same time. */ |
| 1011 | static bool mmu_spte_age(u64 *sptep) |
| 1012 | { |
| 1013 | u64 spte = mmu_spte_get_lockless(sptep); |
| 1014 | |
| 1015 | if (!is_accessed_spte(spte)) |
| 1016 | return false; |
| 1017 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1018 | if (spte_ad_enabled(spte)) { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1019 | clear_bit((ffs(shadow_accessed_mask) - 1), |
| 1020 | (unsigned long *)sptep); |
| 1021 | } else { |
| 1022 | /* |
| 1023 | * Capture the dirty status of the page, so that it doesn't get |
| 1024 | * lost when the SPTE is marked for access tracking. |
| 1025 | */ |
| 1026 | if (is_writable_pte(spte)) |
| 1027 | kvm_set_pfn_dirty(spte_to_pfn(spte)); |
| 1028 | |
| 1029 | spte = mark_spte_for_access_track(spte); |
| 1030 | mmu_spte_update_no_track(sptep, spte); |
| 1031 | } |
| 1032 | |
| 1033 | return true; |
| 1034 | } |
| 1035 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 1036 | static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) |
| 1037 | { |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 1038 | /* |
| 1039 | * Prevent page table teardown by making any free-er wait during |
| 1040 | * kvm_flush_remote_tlbs() IPI to all active vcpus. |
| 1041 | */ |
| 1042 | local_irq_disable(); |
Lan Tianyu | 36ca7e0 | 2016-03-13 11:10:25 +0800 | [diff] [blame] | 1043 | |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 1044 | /* |
| 1045 | * Make sure a following spte read is not reordered ahead of the write |
| 1046 | * to vcpu->mode. |
| 1047 | */ |
Lan Tianyu | 36ca7e0 | 2016-03-13 11:10:25 +0800 | [diff] [blame] | 1048 | smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 1049 | } |
| 1050 | |
| 1051 | static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) |
| 1052 | { |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 1053 | /* |
| 1054 | * Make sure the write to vcpu->mode is not reordered in front of |
Tianyu Lan | 9a98458 | 2018-09-07 05:45:02 +0000 | [diff] [blame] | 1055 | * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 1056 | * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. |
| 1057 | */ |
Lan Tianyu | 36ca7e0 | 2016-03-13 11:10:25 +0800 | [diff] [blame] | 1058 | smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 1059 | local_irq_enable(); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 1060 | } |
| 1061 | |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 1062 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 1063 | struct kmem_cache *base_cache, int min) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1064 | { |
| 1065 | void *obj; |
| 1066 | |
| 1067 | if (cache->nobjs >= min) |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 1068 | return 0; |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1069 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { |
Ben Gardon | 254272c | 2019-02-11 11:02:50 -0800 | [diff] [blame] | 1070 | obj = kmem_cache_zalloc(base_cache, GFP_KERNEL_ACCOUNT); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1071 | if (!obj) |
Wei Yang | daefb79 | 2018-09-04 23:57:32 +0800 | [diff] [blame] | 1072 | return cache->nobjs >= min ? 0 : -ENOMEM; |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1073 | cache->objects[cache->nobjs++] = obj; |
| 1074 | } |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 1075 | return 0; |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1076 | } |
| 1077 | |
Xiao Guangrong | f759e2b | 2011-09-22 16:53:17 +0800 | [diff] [blame] | 1078 | static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache) |
| 1079 | { |
| 1080 | return cache->nobjs; |
| 1081 | } |
| 1082 | |
Xiao Guangrong | e8ad9a7 | 2010-05-13 10:06:02 +0800 | [diff] [blame] | 1083 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, |
| 1084 | struct kmem_cache *cache) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1085 | { |
| 1086 | while (mc->nobjs) |
Xiao Guangrong | e8ad9a7 | 2010-05-13 10:06:02 +0800 | [diff] [blame] | 1087 | kmem_cache_free(cache, mc->objects[--mc->nobjs]); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1088 | } |
| 1089 | |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 1090 | static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 1091 | int min) |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 1092 | { |
Xiao Guangrong | 842f22e | 2011-03-04 19:01:10 +0800 | [diff] [blame] | 1093 | void *page; |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 1094 | |
| 1095 | if (cache->nobjs >= min) |
| 1096 | return 0; |
| 1097 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { |
Shakeel Butt | d97e5e6 | 2018-07-26 16:37:45 -0700 | [diff] [blame] | 1098 | page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 1099 | if (!page) |
Wei Yang | daefb79 | 2018-09-04 23:57:32 +0800 | [diff] [blame] | 1100 | return cache->nobjs >= min ? 0 : -ENOMEM; |
Xiao Guangrong | 842f22e | 2011-03-04 19:01:10 +0800 | [diff] [blame] | 1101 | cache->objects[cache->nobjs++] = page; |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 1102 | } |
| 1103 | return 0; |
| 1104 | } |
| 1105 | |
| 1106 | static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) |
| 1107 | { |
| 1108 | while (mc->nobjs) |
Avi Kivity | c4d198d | 2007-07-21 09:06:46 +0300 | [diff] [blame] | 1109 | free_page((unsigned long)mc->objects[--mc->nobjs]); |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 1110 | } |
| 1111 | |
Avi Kivity | 8c43850 | 2007-04-16 11:53:17 +0300 | [diff] [blame] | 1112 | static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) |
| 1113 | { |
| 1114 | int r; |
| 1115 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1116 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1117 | pte_list_desc_cache, 8 + PTE_PREFETCH_NUM); |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 1118 | if (r) |
| 1119 | goto out; |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 1120 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 1121 | if (r) |
| 1122 | goto out; |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 1123 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 1124 | mmu_page_header_cache, 4); |
| 1125 | out: |
Avi Kivity | 8c43850 | 2007-04-16 11:53:17 +0300 | [diff] [blame] | 1126 | return r; |
| 1127 | } |
| 1128 | |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1129 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
| 1130 | { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1131 | mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
| 1132 | pte_list_desc_cache); |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 1133 | mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); |
Xiao Guangrong | e8ad9a7 | 2010-05-13 10:06:02 +0800 | [diff] [blame] | 1134 | mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, |
| 1135 | mmu_page_header_cache); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1136 | } |
| 1137 | |
Takuya Yoshikawa | 80feb89 | 2012-05-29 23:54:26 +0900 | [diff] [blame] | 1138 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1139 | { |
| 1140 | void *p; |
| 1141 | |
| 1142 | BUG_ON(!mc->nobjs); |
| 1143 | p = mc->objects[--mc->nobjs]; |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1144 | return p; |
| 1145 | } |
| 1146 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1147 | static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1148 | { |
Takuya Yoshikawa | 80feb89 | 2012-05-29 23:54:26 +0900 | [diff] [blame] | 1149 | return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1150 | } |
| 1151 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1152 | static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1153 | { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1154 | kmem_cache_free(pte_list_desc_cache, pte_list_desc); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1155 | } |
| 1156 | |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1157 | static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) |
| 1158 | { |
| 1159 | if (!sp->role.direct) |
| 1160 | return sp->gfns[index]; |
| 1161 | |
| 1162 | return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); |
| 1163 | } |
| 1164 | |
| 1165 | static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) |
| 1166 | { |
Paolo Bonzini | e9f2a76 | 2019-06-30 08:36:21 -0400 | [diff] [blame] | 1167 | if (!sp->role.direct) { |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1168 | sp->gfns[index] = gfn; |
Paolo Bonzini | e9f2a76 | 2019-06-30 08:36:21 -0400 | [diff] [blame] | 1169 | return; |
| 1170 | } |
| 1171 | |
| 1172 | if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) |
| 1173 | pr_err_ratelimited("gfn mismatch under direct page %llx " |
| 1174 | "(expected %llx, got %llx)\n", |
| 1175 | sp->gfn, |
| 1176 | kvm_mmu_page_get_gfn(sp, index), gfn); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1177 | } |
| 1178 | |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1179 | /* |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 1180 | * Return the pointer to the large page information for a given gfn, |
| 1181 | * handling slots that are not large page aligned. |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1182 | */ |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 1183 | static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, |
| 1184 | struct kvm_memory_slot *slot, |
| 1185 | int level) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1186 | { |
| 1187 | unsigned long idx; |
| 1188 | |
Takuya Yoshikawa | fb03cb6 | 2012-02-08 12:59:10 +0900 | [diff] [blame] | 1189 | idx = gfn_to_index(gfn, slot->base_gfn, level); |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 1190 | return &slot->arch.lpage_info[level - 2][idx]; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1191 | } |
| 1192 | |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 1193 | static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, |
| 1194 | gfn_t gfn, int count) |
| 1195 | { |
| 1196 | struct kvm_lpage_info *linfo; |
| 1197 | int i; |
| 1198 | |
| 1199 | for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { |
| 1200 | linfo = lpage_info_slot(gfn, slot, i); |
| 1201 | linfo->disallow_lpage += count; |
| 1202 | WARN_ON(linfo->disallow_lpage < 0); |
| 1203 | } |
| 1204 | } |
| 1205 | |
| 1206 | void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) |
| 1207 | { |
| 1208 | update_gfn_disallow_lpage_count(slot, gfn, 1); |
| 1209 | } |
| 1210 | |
| 1211 | void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) |
| 1212 | { |
| 1213 | update_gfn_disallow_lpage_count(slot, gfn, -1); |
| 1214 | } |
| 1215 | |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1216 | static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1217 | { |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1218 | struct kvm_memslots *slots; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 1219 | struct kvm_memory_slot *slot; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1220 | gfn_t gfn; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1221 | |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 1222 | kvm->arch.indirect_shadow_pages++; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1223 | gfn = sp->gfn; |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1224 | slots = kvm_memslots_for_spte_role(kvm, sp->role); |
| 1225 | slot = __gfn_to_memslot(slots, gfn); |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 1226 | |
| 1227 | /* the non-leaf shadow pages are keeping readonly. */ |
| 1228 | if (sp->role.level > PT_PAGE_TABLE_LEVEL) |
| 1229 | return kvm_slot_page_track_add_page(kvm, slot, gfn, |
| 1230 | KVM_PAGE_TRACK_WRITE); |
| 1231 | |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 1232 | kvm_mmu_gfn_disallow_lpage(slot, gfn); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1233 | } |
| 1234 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 1235 | static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 1236 | { |
| 1237 | if (sp->lpage_disallowed) |
| 1238 | return; |
| 1239 | |
| 1240 | ++kvm->stat.nx_lpage_splits; |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1241 | list_add_tail(&sp->lpage_disallowed_link, |
| 1242 | &kvm->arch.lpage_disallowed_mmu_pages); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 1243 | sp->lpage_disallowed = true; |
| 1244 | } |
| 1245 | |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1246 | static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1247 | { |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1248 | struct kvm_memslots *slots; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 1249 | struct kvm_memory_slot *slot; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1250 | gfn_t gfn; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1251 | |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 1252 | kvm->arch.indirect_shadow_pages--; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1253 | gfn = sp->gfn; |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1254 | slots = kvm_memslots_for_spte_role(kvm, sp->role); |
| 1255 | slot = __gfn_to_memslot(slots, gfn); |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 1256 | if (sp->role.level > PT_PAGE_TABLE_LEVEL) |
| 1257 | return kvm_slot_page_track_remove_page(kvm, slot, gfn, |
| 1258 | KVM_PAGE_TRACK_WRITE); |
| 1259 | |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 1260 | kvm_mmu_gfn_allow_lpage(slot, gfn); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1261 | } |
| 1262 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 1263 | static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 1264 | { |
| 1265 | --kvm->stat.nx_lpage_splits; |
| 1266 | sp->lpage_disallowed = false; |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1267 | list_del(&sp->lpage_disallowed_link); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 1268 | } |
| 1269 | |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 1270 | static struct kvm_memory_slot * |
| 1271 | gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 1272 | bool no_dirty_log) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1273 | { |
| 1274 | struct kvm_memory_slot *slot; |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 1275 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 1276 | slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
Paolo Bonzini | 91b0d26 | 2020-01-21 16:16:32 +0100 | [diff] [blame] | 1277 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID) |
| 1278 | return NULL; |
| 1279 | if (no_dirty_log && slot->dirty_bitmap) |
| 1280 | return NULL; |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 1281 | |
| 1282 | return slot; |
| 1283 | } |
| 1284 | |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1285 | /* |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1286 | * About rmap_head encoding: |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1287 | * |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1288 | * If the bit zero of rmap_head->val is clear, then it points to the only spte |
| 1289 | * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1290 | * pte_list_desc containing more mappings. |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1291 | */ |
| 1292 | |
| 1293 | /* |
| 1294 | * Returns the number of pointers in the rmap chain, not counting the new one. |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1295 | */ |
| 1296 | static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1297 | struct kvm_rmap_head *rmap_head) |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1298 | { |
| 1299 | struct pte_list_desc *desc; |
| 1300 | int i, count = 0; |
| 1301 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1302 | if (!rmap_head->val) { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1303 | rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1304 | rmap_head->val = (unsigned long)spte; |
| 1305 | } else if (!(rmap_head->val & 1)) { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1306 | rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); |
| 1307 | desc = mmu_alloc_pte_list_desc(vcpu); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1308 | desc->sptes[0] = (u64 *)rmap_head->val; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1309 | desc->sptes[1] = spte; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1310 | rmap_head->val = (unsigned long)desc | 1; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1311 | ++count; |
| 1312 | } else { |
| 1313 | rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1314 | desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1315 | while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { |
| 1316 | desc = desc->more; |
| 1317 | count += PTE_LIST_EXT; |
| 1318 | } |
| 1319 | if (desc->sptes[PTE_LIST_EXT-1]) { |
| 1320 | desc->more = mmu_alloc_pte_list_desc(vcpu); |
| 1321 | desc = desc->more; |
| 1322 | } |
| 1323 | for (i = 0; desc->sptes[i]; ++i) |
| 1324 | ++count; |
| 1325 | desc->sptes[i] = spte; |
| 1326 | } |
| 1327 | return count; |
| 1328 | } |
| 1329 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1330 | static void |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1331 | pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, |
| 1332 | struct pte_list_desc *desc, int i, |
| 1333 | struct pte_list_desc *prev_desc) |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1334 | { |
| 1335 | int j; |
| 1336 | |
| 1337 | for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) |
| 1338 | ; |
| 1339 | desc->sptes[i] = desc->sptes[j]; |
| 1340 | desc->sptes[j] = NULL; |
| 1341 | if (j != 0) |
| 1342 | return; |
| 1343 | if (!prev_desc && !desc->more) |
Miaohe Lin | fe3c2b4 | 2019-12-05 11:40:16 +0800 | [diff] [blame] | 1344 | rmap_head->val = 0; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1345 | else |
| 1346 | if (prev_desc) |
| 1347 | prev_desc->more = desc->more; |
| 1348 | else |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1349 | rmap_head->val = (unsigned long)desc->more | 1; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1350 | mmu_free_pte_list_desc(desc); |
| 1351 | } |
| 1352 | |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1353 | static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1354 | { |
| 1355 | struct pte_list_desc *desc; |
| 1356 | struct pte_list_desc *prev_desc; |
| 1357 | int i; |
| 1358 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1359 | if (!rmap_head->val) { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1360 | pr_err("%s: %p 0->BUG\n", __func__, spte); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1361 | BUG(); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1362 | } else if (!(rmap_head->val & 1)) { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1363 | rmap_printk("%s: %p 1->0\n", __func__, spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1364 | if ((u64 *)rmap_head->val != spte) { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1365 | pr_err("%s: %p 1->BUG\n", __func__, spte); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1366 | BUG(); |
| 1367 | } |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1368 | rmap_head->val = 0; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1369 | } else { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1370 | rmap_printk("%s: %p many->many\n", __func__, spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1371 | desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1372 | prev_desc = NULL; |
| 1373 | while (desc) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1374 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1375 | if (desc->sptes[i] == spte) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1376 | pte_list_desc_remove_entry(rmap_head, |
| 1377 | desc, i, prev_desc); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1378 | return; |
| 1379 | } |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1380 | } |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1381 | prev_desc = desc; |
| 1382 | desc = desc->more; |
| 1383 | } |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1384 | pr_err("%s: %p many->many\n", __func__, spte); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1385 | BUG(); |
| 1386 | } |
| 1387 | } |
| 1388 | |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 1389 | static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) |
| 1390 | { |
| 1391 | mmu_spte_clear_track_bits(sptep); |
| 1392 | __pte_list_remove(sptep, rmap_head); |
| 1393 | } |
| 1394 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1395 | static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, |
| 1396 | struct kvm_memory_slot *slot) |
Takuya Yoshikawa | 9b9b149 | 2011-11-14 18:22:28 +0900 | [diff] [blame] | 1397 | { |
Takuya Yoshikawa | 77d1130 | 2012-07-02 17:57:17 +0900 | [diff] [blame] | 1398 | unsigned long idx; |
Takuya Yoshikawa | 9b9b149 | 2011-11-14 18:22:28 +0900 | [diff] [blame] | 1399 | |
Takuya Yoshikawa | 77d1130 | 2012-07-02 17:57:17 +0900 | [diff] [blame] | 1400 | idx = gfn_to_index(gfn, slot->base_gfn, level); |
Takuya Yoshikawa | d89cc61 | 2012-08-01 18:03:28 +0900 | [diff] [blame] | 1401 | return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; |
Takuya Yoshikawa | 9b9b149 | 2011-11-14 18:22:28 +0900 | [diff] [blame] | 1402 | } |
| 1403 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1404 | static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, |
| 1405 | struct kvm_mmu_page *sp) |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 1406 | { |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1407 | struct kvm_memslots *slots; |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 1408 | struct kvm_memory_slot *slot; |
| 1409 | |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1410 | slots = kvm_memslots_for_spte_role(kvm, sp->role); |
| 1411 | slot = __gfn_to_memslot(slots, gfn); |
Paolo Bonzini | e4cd1da | 2015-05-18 15:11:46 +0200 | [diff] [blame] | 1412 | return __gfn_to_rmap(gfn, sp->role.level, slot); |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 1413 | } |
| 1414 | |
Xiao Guangrong | f759e2b | 2011-09-22 16:53:17 +0800 | [diff] [blame] | 1415 | static bool rmap_can_add(struct kvm_vcpu *vcpu) |
| 1416 | { |
| 1417 | struct kvm_mmu_memory_cache *cache; |
| 1418 | |
| 1419 | cache = &vcpu->arch.mmu_pte_list_desc_cache; |
| 1420 | return mmu_memory_cache_free_objects(cache); |
| 1421 | } |
| 1422 | |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 1423 | static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1424 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1425 | struct kvm_mmu_page *sp; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1426 | struct kvm_rmap_head *rmap_head; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1427 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1428 | sp = page_header(__pa(spte)); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1429 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1430 | rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); |
| 1431 | return pte_list_add(vcpu, spte, rmap_head); |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1432 | } |
| 1433 | |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 1434 | static void rmap_remove(struct kvm *kvm, u64 *spte) |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1435 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1436 | struct kvm_mmu_page *sp; |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1437 | gfn_t gfn; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1438 | struct kvm_rmap_head *rmap_head; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1439 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1440 | sp = page_header(__pa(spte)); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1441 | gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1442 | rmap_head = gfn_to_rmap(kvm, gfn, sp); |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1443 | __pte_list_remove(spte, rmap_head); |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1444 | } |
| 1445 | |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1446 | /* |
| 1447 | * Used by the following functions to iterate through the sptes linked by a |
| 1448 | * rmap. All fields are private and not assumed to be used outside. |
| 1449 | */ |
| 1450 | struct rmap_iterator { |
| 1451 | /* private fields */ |
| 1452 | struct pte_list_desc *desc; /* holds the sptep if not NULL */ |
| 1453 | int pos; /* index of the sptep */ |
| 1454 | }; |
| 1455 | |
| 1456 | /* |
| 1457 | * Iteration must be started by this function. This should also be used after |
| 1458 | * removing/dropping sptes from the rmap link because in such cases the |
Miaohe Lin | 0a03cbd | 2019-12-06 16:20:18 +0800 | [diff] [blame] | 1459 | * information in the iterator may not be valid. |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1460 | * |
| 1461 | * Returns sptep if found, NULL otherwise. |
| 1462 | */ |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1463 | static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head, |
| 1464 | struct rmap_iterator *iter) |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1465 | { |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1466 | u64 *sptep; |
| 1467 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1468 | if (!rmap_head->val) |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1469 | return NULL; |
| 1470 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1471 | if (!(rmap_head->val & 1)) { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1472 | iter->desc = NULL; |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1473 | sptep = (u64 *)rmap_head->val; |
| 1474 | goto out; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1475 | } |
| 1476 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1477 | iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1478 | iter->pos = 0; |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1479 | sptep = iter->desc->sptes[iter->pos]; |
| 1480 | out: |
| 1481 | BUG_ON(!is_shadow_present_pte(*sptep)); |
| 1482 | return sptep; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1483 | } |
| 1484 | |
| 1485 | /* |
| 1486 | * Must be used with a valid iterator: e.g. after rmap_get_first(). |
| 1487 | * |
| 1488 | * Returns sptep if found, NULL otherwise. |
| 1489 | */ |
| 1490 | static u64 *rmap_get_next(struct rmap_iterator *iter) |
| 1491 | { |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1492 | u64 *sptep; |
| 1493 | |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1494 | if (iter->desc) { |
| 1495 | if (iter->pos < PTE_LIST_EXT - 1) { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1496 | ++iter->pos; |
| 1497 | sptep = iter->desc->sptes[iter->pos]; |
| 1498 | if (sptep) |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1499 | goto out; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1500 | } |
| 1501 | |
| 1502 | iter->desc = iter->desc->more; |
| 1503 | |
| 1504 | if (iter->desc) { |
| 1505 | iter->pos = 0; |
| 1506 | /* desc->sptes[0] cannot be NULL */ |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1507 | sptep = iter->desc->sptes[iter->pos]; |
| 1508 | goto out; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1509 | } |
| 1510 | } |
| 1511 | |
| 1512 | return NULL; |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1513 | out: |
| 1514 | BUG_ON(!is_shadow_present_pte(*sptep)); |
| 1515 | return sptep; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1516 | } |
| 1517 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1518 | #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \ |
| 1519 | for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \ |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1520 | _spte_; _spte_ = rmap_get_next(_iter_)) |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1521 | |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 1522 | static void drop_spte(struct kvm *kvm, u64 *sptep) |
Xiao Guangrong | e4b502e | 2010-07-16 11:28:09 +0800 | [diff] [blame] | 1523 | { |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 1524 | if (mmu_spte_clear_track_bits(sptep)) |
Marcelo Tosatti | eb45fda | 2010-10-25 11:58:22 -0200 | [diff] [blame] | 1525 | rmap_remove(kvm, sptep); |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 1526 | } |
| 1527 | |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1528 | |
| 1529 | static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) |
| 1530 | { |
| 1531 | if (is_large_pte(*sptep)) { |
| 1532 | WARN_ON(page_header(__pa(sptep))->role.level == |
| 1533 | PT_PAGE_TABLE_LEVEL); |
| 1534 | drop_spte(kvm, sptep); |
| 1535 | --kvm->stat.lpages; |
| 1536 | return true; |
| 1537 | } |
| 1538 | |
| 1539 | return false; |
| 1540 | } |
| 1541 | |
| 1542 | static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) |
| 1543 | { |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 1544 | if (__drop_large_spte(vcpu->kvm, sptep)) { |
| 1545 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); |
| 1546 | |
| 1547 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, |
| 1548 | KVM_PAGES_PER_HPAGE(sp->role.level)); |
| 1549 | } |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1550 | } |
| 1551 | |
| 1552 | /* |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1553 | * Write-protect on the specified @sptep, @pt_protect indicates whether |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 1554 | * spte write-protection is caused by protecting shadow page table. |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1555 | * |
Tiejun Chen | b461966 | 2014-09-22 10:31:38 +0800 | [diff] [blame] | 1556 | * Note: write protection is difference between dirty logging and spte |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1557 | * protection: |
| 1558 | * - for dirty logging, the spte can be set to writable at anytime if |
| 1559 | * its dirty bitmap is properly set. |
| 1560 | * - for spte protection, the spte can be writable only after unsync-ing |
| 1561 | * shadow page. |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1562 | * |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 1563 | * Return true if tlb need be flushed. |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1564 | */ |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1565 | static bool spte_write_protect(u64 *sptep, bool pt_protect) |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1566 | { |
| 1567 | u64 spte = *sptep; |
| 1568 | |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1569 | if (!is_writable_pte(spte) && |
Junaid Shahid | ea4114b | 2016-12-06 16:46:11 -0800 | [diff] [blame] | 1570 | !(pt_protect && spte_can_locklessly_be_made_writable(spte))) |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1571 | return false; |
| 1572 | |
| 1573 | rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); |
| 1574 | |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1575 | if (pt_protect) |
| 1576 | spte &= ~SPTE_MMU_WRITEABLE; |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1577 | spte = spte & ~PT_WRITABLE_MASK; |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1578 | |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 1579 | return mmu_spte_update(sptep, spte); |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1580 | } |
| 1581 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1582 | static bool __rmap_write_protect(struct kvm *kvm, |
| 1583 | struct kvm_rmap_head *rmap_head, |
Takuya Yoshikawa | 245c391 | 2013-01-08 19:44:09 +0900 | [diff] [blame] | 1584 | bool pt_protect) |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1585 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1586 | u64 *sptep; |
| 1587 | struct rmap_iterator iter; |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1588 | bool flush = false; |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1589 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1590 | for_each_rmap_spte(rmap_head, &iter, sptep) |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1591 | flush |= spte_write_protect(sptep, pt_protect); |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1592 | |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1593 | return flush; |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1594 | } |
| 1595 | |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1596 | static bool spte_clear_dirty(u64 *sptep) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1597 | { |
| 1598 | u64 spte = *sptep; |
| 1599 | |
| 1600 | rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); |
| 1601 | |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1602 | MMU_WARN_ON(!spte_ad_enabled(spte)); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1603 | spte &= ~shadow_dirty_mask; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1604 | return mmu_spte_update(sptep, spte); |
| 1605 | } |
| 1606 | |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1607 | static bool spte_wrprot_for_clear_dirty(u64 *sptep) |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1608 | { |
| 1609 | bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT, |
| 1610 | (unsigned long *)sptep); |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1611 | if (was_writable && !spte_ad_enabled(*sptep)) |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1612 | kvm_set_pfn_dirty(spte_to_pfn(*sptep)); |
| 1613 | |
| 1614 | return was_writable; |
| 1615 | } |
| 1616 | |
| 1617 | /* |
| 1618 | * Gets the GFN ready for another round of dirty logging by clearing the |
| 1619 | * - D bit on ad-enabled SPTEs, and |
| 1620 | * - W bit on ad-disabled SPTEs. |
| 1621 | * Returns true iff any D or W bits were cleared. |
| 1622 | */ |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1623 | static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1624 | { |
| 1625 | u64 *sptep; |
| 1626 | struct rmap_iterator iter; |
| 1627 | bool flush = false; |
| 1628 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1629 | for_each_rmap_spte(rmap_head, &iter, sptep) |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1630 | if (spte_ad_need_write_protect(*sptep)) |
| 1631 | flush |= spte_wrprot_for_clear_dirty(sptep); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1632 | else |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1633 | flush |= spte_clear_dirty(sptep); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1634 | |
| 1635 | return flush; |
| 1636 | } |
| 1637 | |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1638 | static bool spte_set_dirty(u64 *sptep) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1639 | { |
| 1640 | u64 spte = *sptep; |
| 1641 | |
| 1642 | rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); |
| 1643 | |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1644 | /* |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 1645 | * Similar to the !kvm_x86_ops.slot_disable_log_dirty case, |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1646 | * do not bother adding back write access to pages marked |
| 1647 | * SPTE_AD_WRPROT_ONLY_MASK. |
| 1648 | */ |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1649 | spte |= shadow_dirty_mask; |
| 1650 | |
| 1651 | return mmu_spte_update(sptep, spte); |
| 1652 | } |
| 1653 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1654 | static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1655 | { |
| 1656 | u64 *sptep; |
| 1657 | struct rmap_iterator iter; |
| 1658 | bool flush = false; |
| 1659 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1660 | for_each_rmap_spte(rmap_head, &iter, sptep) |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1661 | if (spte_ad_enabled(*sptep)) |
| 1662 | flush |= spte_set_dirty(sptep); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1663 | |
| 1664 | return flush; |
| 1665 | } |
| 1666 | |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1667 | /** |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1668 | * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1669 | * @kvm: kvm instance |
| 1670 | * @slot: slot to protect |
| 1671 | * @gfn_offset: start of the BITS_PER_LONG pages we care about |
| 1672 | * @mask: indicates which pages we should protect |
| 1673 | * |
| 1674 | * Used when we do not need to care about huge page mappings: e.g. during dirty |
| 1675 | * logging we do not have any such mappings. |
| 1676 | */ |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1677 | static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1678 | struct kvm_memory_slot *slot, |
| 1679 | gfn_t gfn_offset, unsigned long mask) |
Izik Eidus | 98348e9 | 2007-10-16 14:42:30 +0200 | [diff] [blame] | 1680 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1681 | struct kvm_rmap_head *rmap_head; |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1682 | |
| 1683 | while (mask) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1684 | rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), |
| 1685 | PT_PAGE_TABLE_LEVEL, slot); |
| 1686 | __rmap_write_protect(kvm, rmap_head, false); |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1687 | |
| 1688 | /* clear the first set bit */ |
| 1689 | mask &= mask - 1; |
| 1690 | } |
| 1691 | } |
| 1692 | |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1693 | /** |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1694 | * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write |
| 1695 | * protect the page if the D-bit isn't supported. |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1696 | * @kvm: kvm instance |
| 1697 | * @slot: slot to clear D-bit |
| 1698 | * @gfn_offset: start of the BITS_PER_LONG pages we care about |
| 1699 | * @mask: indicates which pages we should clear D-bit |
| 1700 | * |
| 1701 | * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. |
| 1702 | */ |
| 1703 | void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, |
| 1704 | struct kvm_memory_slot *slot, |
| 1705 | gfn_t gfn_offset, unsigned long mask) |
| 1706 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1707 | struct kvm_rmap_head *rmap_head; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1708 | |
| 1709 | while (mask) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1710 | rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), |
| 1711 | PT_PAGE_TABLE_LEVEL, slot); |
| 1712 | __rmap_clear_dirty(kvm, rmap_head); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1713 | |
| 1714 | /* clear the first set bit */ |
| 1715 | mask &= mask - 1; |
| 1716 | } |
| 1717 | } |
| 1718 | EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked); |
| 1719 | |
| 1720 | /** |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1721 | * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected |
| 1722 | * PT level pages. |
| 1723 | * |
| 1724 | * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to |
| 1725 | * enable dirty logging for them. |
| 1726 | * |
| 1727 | * Used when we do not need to care about huge page mappings: e.g. during dirty |
| 1728 | * logging we do not have any such mappings. |
| 1729 | */ |
| 1730 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
| 1731 | struct kvm_memory_slot *slot, |
| 1732 | gfn_t gfn_offset, unsigned long mask) |
| 1733 | { |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 1734 | if (kvm_x86_ops.enable_log_dirty_pt_masked) |
| 1735 | kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset, |
Kai Huang | 88178fd | 2015-01-28 10:54:27 +0800 | [diff] [blame] | 1736 | mask); |
| 1737 | else |
| 1738 | kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1739 | } |
| 1740 | |
Bandan Das | bab4165 | 2017-05-05 15:25:13 -0400 | [diff] [blame] | 1741 | /** |
| 1742 | * kvm_arch_write_log_dirty - emulate dirty page logging |
| 1743 | * @vcpu: Guest mode vcpu |
| 1744 | * |
| 1745 | * Emulate arch specific page modification logging for the |
| 1746 | * nested hypervisor |
| 1747 | */ |
| 1748 | int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu) |
| 1749 | { |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 1750 | if (kvm_x86_ops.write_log_dirty) |
| 1751 | return kvm_x86_ops.write_log_dirty(vcpu); |
Bandan Das | bab4165 | 2017-05-05 15:25:13 -0400 | [diff] [blame] | 1752 | |
| 1753 | return 0; |
| 1754 | } |
| 1755 | |
Xiao Guangrong | aeecee2 | 2016-02-24 17:51:08 +0800 | [diff] [blame] | 1756 | bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, |
| 1757 | struct kvm_memory_slot *slot, u64 gfn) |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1758 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1759 | struct kvm_rmap_head *rmap_head; |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1760 | int i; |
Xiao Guangrong | 2f84569 | 2012-06-20 15:56:53 +0800 | [diff] [blame] | 1761 | bool write_protected = false; |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1762 | |
Xiao Guangrong | 8a3d08f | 2015-05-13 14:42:21 +0800 | [diff] [blame] | 1763 | for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1764 | rmap_head = __gfn_to_rmap(gfn, i, slot); |
Xiao Guangrong | aeecee2 | 2016-02-24 17:51:08 +0800 | [diff] [blame] | 1765 | write_protected |= __rmap_write_protect(kvm, rmap_head, true); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1766 | } |
| 1767 | |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 1768 | return write_protected; |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1769 | } |
| 1770 | |
Xiao Guangrong | aeecee2 | 2016-02-24 17:51:08 +0800 | [diff] [blame] | 1771 | static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) |
| 1772 | { |
| 1773 | struct kvm_memory_slot *slot; |
| 1774 | |
| 1775 | slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 1776 | return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn); |
| 1777 | } |
| 1778 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1779 | static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) |
Xiao Guangrong | 6a49f85 | 2015-05-13 14:42:25 +0800 | [diff] [blame] | 1780 | { |
| 1781 | u64 *sptep; |
| 1782 | struct rmap_iterator iter; |
| 1783 | bool flush = false; |
| 1784 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1785 | while ((sptep = rmap_get_first(rmap_head, &iter))) { |
Xiao Guangrong | 6a49f85 | 2015-05-13 14:42:25 +0800 | [diff] [blame] | 1786 | rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); |
| 1787 | |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 1788 | pte_list_remove(rmap_head, sptep); |
Xiao Guangrong | 6a49f85 | 2015-05-13 14:42:25 +0800 | [diff] [blame] | 1789 | flush = true; |
| 1790 | } |
| 1791 | |
| 1792 | return flush; |
| 1793 | } |
| 1794 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1795 | static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1796 | struct kvm_memory_slot *slot, gfn_t gfn, int level, |
| 1797 | unsigned long data) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1798 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1799 | return kvm_zap_rmapp(kvm, rmap_head); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1800 | } |
| 1801 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1802 | static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1803 | struct kvm_memory_slot *slot, gfn_t gfn, int level, |
| 1804 | unsigned long data) |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1805 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1806 | u64 *sptep; |
| 1807 | struct rmap_iterator iter; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1808 | int need_flush = 0; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1809 | u64 new_spte; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1810 | pte_t *ptep = (pte_t *)data; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 1811 | kvm_pfn_t new_pfn; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1812 | |
| 1813 | WARN_ON(pte_huge(*ptep)); |
| 1814 | new_pfn = pte_pfn(*ptep); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1815 | |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1816 | restart: |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1817 | for_each_rmap_spte(rmap_head, &iter, sptep) { |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1818 | rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1819 | sptep, *sptep, gfn, level); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1820 | |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1821 | need_flush = 1; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1822 | |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1823 | if (pte_write(*ptep)) { |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 1824 | pte_list_remove(rmap_head, sptep); |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1825 | goto restart; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1826 | } else { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1827 | new_spte = *sptep & ~PT64_BASE_ADDR_MASK; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1828 | new_spte |= (u64)new_pfn << PAGE_SHIFT; |
| 1829 | |
| 1830 | new_spte &= ~PT_WRITABLE_MASK; |
| 1831 | new_spte &= ~SPTE_HOST_WRITEABLE; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1832 | |
| 1833 | new_spte = mark_spte_for_access_track(new_spte); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1834 | |
| 1835 | mmu_spte_clear_track_bits(sptep); |
| 1836 | mmu_spte_set(sptep, new_spte); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1837 | } |
| 1838 | } |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1839 | |
Lan Tianyu | 3cc5ea9 | 2018-12-06 21:21:12 +0800 | [diff] [blame] | 1840 | if (need_flush && kvm_available_flush_tlb_with_range()) { |
| 1841 | kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); |
| 1842 | return 0; |
| 1843 | } |
| 1844 | |
Lan Tianyu | 0cf853c | 2018-12-06 21:21:11 +0800 | [diff] [blame] | 1845 | return need_flush; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1846 | } |
| 1847 | |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1848 | struct slot_rmap_walk_iterator { |
| 1849 | /* input fields. */ |
| 1850 | struct kvm_memory_slot *slot; |
| 1851 | gfn_t start_gfn; |
| 1852 | gfn_t end_gfn; |
| 1853 | int start_level; |
| 1854 | int end_level; |
| 1855 | |
| 1856 | /* output fields. */ |
| 1857 | gfn_t gfn; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1858 | struct kvm_rmap_head *rmap; |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1859 | int level; |
| 1860 | |
| 1861 | /* private field. */ |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1862 | struct kvm_rmap_head *end_rmap; |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1863 | }; |
| 1864 | |
| 1865 | static void |
| 1866 | rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) |
| 1867 | { |
| 1868 | iterator->level = level; |
| 1869 | iterator->gfn = iterator->start_gfn; |
| 1870 | iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot); |
| 1871 | iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level, |
| 1872 | iterator->slot); |
| 1873 | } |
| 1874 | |
| 1875 | static void |
| 1876 | slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, |
| 1877 | struct kvm_memory_slot *slot, int start_level, |
| 1878 | int end_level, gfn_t start_gfn, gfn_t end_gfn) |
| 1879 | { |
| 1880 | iterator->slot = slot; |
| 1881 | iterator->start_level = start_level; |
| 1882 | iterator->end_level = end_level; |
| 1883 | iterator->start_gfn = start_gfn; |
| 1884 | iterator->end_gfn = end_gfn; |
| 1885 | |
| 1886 | rmap_walk_init_level(iterator, iterator->start_level); |
| 1887 | } |
| 1888 | |
| 1889 | static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator) |
| 1890 | { |
| 1891 | return !!iterator->rmap; |
| 1892 | } |
| 1893 | |
| 1894 | static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) |
| 1895 | { |
| 1896 | if (++iterator->rmap <= iterator->end_rmap) { |
| 1897 | iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); |
| 1898 | return; |
| 1899 | } |
| 1900 | |
| 1901 | if (++iterator->level > iterator->end_level) { |
| 1902 | iterator->rmap = NULL; |
| 1903 | return; |
| 1904 | } |
| 1905 | |
| 1906 | rmap_walk_init_level(iterator, iterator->level); |
| 1907 | } |
| 1908 | |
| 1909 | #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \ |
| 1910 | _start_gfn, _end_gfn, _iter_) \ |
| 1911 | for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \ |
| 1912 | _end_level_, _start_gfn, _end_gfn); \ |
| 1913 | slot_rmap_walk_okay(_iter_); \ |
| 1914 | slot_rmap_walk_next(_iter_)) |
| 1915 | |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 1916 | static int kvm_handle_hva_range(struct kvm *kvm, |
| 1917 | unsigned long start, |
| 1918 | unsigned long end, |
| 1919 | unsigned long data, |
| 1920 | int (*handler)(struct kvm *kvm, |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1921 | struct kvm_rmap_head *rmap_head, |
Takuya Yoshikawa | 048212d | 2012-07-02 17:57:59 +0900 | [diff] [blame] | 1922 | struct kvm_memory_slot *slot, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1923 | gfn_t gfn, |
| 1924 | int level, |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 1925 | unsigned long data)) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1926 | { |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 1927 | struct kvm_memslots *slots; |
Xiao Guangrong | be6ba0f | 2011-11-24 17:39:18 +0800 | [diff] [blame] | 1928 | struct kvm_memory_slot *memslot; |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1929 | struct slot_rmap_walk_iterator iterator; |
| 1930 | int ret = 0; |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 1931 | int i; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1932 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 1933 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 1934 | slots = __kvm_memslots(kvm, i); |
| 1935 | kvm_for_each_memslot(memslot, slots) { |
| 1936 | unsigned long hva_start, hva_end; |
| 1937 | gfn_t gfn_start, gfn_end; |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 1938 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 1939 | hva_start = max(start, memslot->userspace_addr); |
| 1940 | hva_end = min(end, memslot->userspace_addr + |
| 1941 | (memslot->npages << PAGE_SHIFT)); |
| 1942 | if (hva_start >= hva_end) |
| 1943 | continue; |
| 1944 | /* |
| 1945 | * {gfn(page) | page intersects with [hva_start, hva_end)} = |
| 1946 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. |
| 1947 | */ |
| 1948 | gfn_start = hva_to_gfn_memslot(hva_start, memslot); |
| 1949 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1950 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 1951 | for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL, |
| 1952 | PT_MAX_HUGEPAGE_LEVEL, |
| 1953 | gfn_start, gfn_end - 1, |
| 1954 | &iterator) |
| 1955 | ret |= handler(kvm, iterator.rmap, memslot, |
| 1956 | iterator.gfn, iterator.level, data); |
| 1957 | } |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1958 | } |
| 1959 | |
Takuya Yoshikawa | f395302 | 2012-07-02 17:58:48 +0900 | [diff] [blame] | 1960 | return ret; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1961 | } |
| 1962 | |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 1963 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, |
| 1964 | unsigned long data, |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1965 | int (*handler)(struct kvm *kvm, |
| 1966 | struct kvm_rmap_head *rmap_head, |
Takuya Yoshikawa | 048212d | 2012-07-02 17:57:59 +0900 | [diff] [blame] | 1967 | struct kvm_memory_slot *slot, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1968 | gfn_t gfn, int level, |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 1969 | unsigned long data)) |
| 1970 | { |
| 1971 | return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1972 | } |
| 1973 | |
Takuya Yoshikawa | b3ae209 | 2012-07-02 17:56:33 +0900 | [diff] [blame] | 1974 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) |
| 1975 | { |
| 1976 | return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); |
| 1977 | } |
| 1978 | |
Lan Tianyu | 748c0e3 | 2018-12-06 21:21:10 +0800 | [diff] [blame] | 1979 | int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1980 | { |
Lan Tianyu | 0cf853c | 2018-12-06 21:21:11 +0800 | [diff] [blame] | 1981 | return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1982 | } |
| 1983 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1984 | static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1985 | struct kvm_memory_slot *slot, gfn_t gfn, int level, |
| 1986 | unsigned long data) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1987 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1988 | u64 *sptep; |
Michael S. Tsirkin | 79f702a | 2012-06-03 11:34:08 +0300 | [diff] [blame] | 1989 | struct rmap_iterator uninitialized_var(iter); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1990 | int young = 0; |
| 1991 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1992 | for_each_rmap_spte(rmap_head, &iter, sptep) |
| 1993 | young |= mmu_spte_age(sptep); |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1994 | |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1995 | trace_kvm_age_page(gfn, level, slot, young); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1996 | return young; |
| 1997 | } |
| 1998 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1999 | static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 2000 | struct kvm_memory_slot *slot, gfn_t gfn, |
| 2001 | int level, unsigned long data) |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 2002 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 2003 | u64 *sptep; |
| 2004 | struct rmap_iterator iter; |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 2005 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 2006 | for_each_rmap_spte(rmap_head, &iter, sptep) |
| 2007 | if (is_accessed_spte(*sptep)) |
| 2008 | return 1; |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 2009 | return 0; |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 2010 | } |
| 2011 | |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 2012 | #define RMAP_RECYCLE_THRESHOLD 1000 |
| 2013 | |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 2014 | static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 2015 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 2016 | struct kvm_rmap_head *rmap_head; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 2017 | struct kvm_mmu_page *sp; |
| 2018 | |
| 2019 | sp = page_header(__pa(spte)); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 2020 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 2021 | rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 2022 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 2023 | kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 2024 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, |
| 2025 | KVM_PAGES_PER_HPAGE(sp->role.level)); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 2026 | } |
| 2027 | |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 2028 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 2029 | { |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 2030 | return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 2031 | } |
| 2032 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 2033 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) |
| 2034 | { |
| 2035 | return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); |
| 2036 | } |
| 2037 | |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 2038 | #ifdef MMU_DEBUG |
Avi Kivity | 47ad8e6 | 2007-05-06 15:50:58 +0300 | [diff] [blame] | 2039 | static int is_empty_shadow_page(u64 *spt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2040 | { |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 2041 | u64 *pos; |
| 2042 | u64 *end; |
| 2043 | |
Avi Kivity | 47ad8e6 | 2007-05-06 15:50:58 +0300 | [diff] [blame] | 2044 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
Avi Kivity | 3c91551 | 2008-05-20 16:21:13 +0300 | [diff] [blame] | 2045 | if (is_shadow_present_pte(*pos)) { |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 2046 | printk(KERN_ERR "%s: %p %llx\n", __func__, |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 2047 | pos, *pos); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2048 | return 0; |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 2049 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2050 | return 1; |
| 2051 | } |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 2052 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2053 | |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 2054 | /* |
| 2055 | * This value is the sum of all of the kvm instances's |
| 2056 | * kvm->arch.n_used_mmu_pages values. We need a global, |
| 2057 | * aggregate version in order to make the slab shrinker |
| 2058 | * faster |
| 2059 | */ |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 2060 | static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 2061 | { |
| 2062 | kvm->arch.n_used_mmu_pages += nr; |
| 2063 | percpu_counter_add(&kvm_total_used_mmu_pages, nr); |
| 2064 | } |
| 2065 | |
Gleb Natapov | 834be0d | 2013-01-30 16:45:05 +0200 | [diff] [blame] | 2066 | static void kvm_mmu_free_page(struct kvm_mmu_page *sp) |
Avi Kivity | 260746c | 2007-01-05 16:36:49 -0800 | [diff] [blame] | 2067 | { |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 2068 | MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2069 | hlist_del(&sp->hash_link); |
Xiao Guangrong | bd4c86e | 2011-07-12 03:27:14 +0800 | [diff] [blame] | 2070 | list_del(&sp->link); |
| 2071 | free_page((unsigned long)sp->spt); |
Gleb Natapov | 834be0d | 2013-01-30 16:45:05 +0200 | [diff] [blame] | 2072 | if (!sp->role.direct) |
| 2073 | free_page((unsigned long)sp->gfns); |
Xiao Guangrong | e8ad9a7 | 2010-05-13 10:06:02 +0800 | [diff] [blame] | 2074 | kmem_cache_free(mmu_page_header_cache, sp); |
Avi Kivity | 260746c | 2007-01-05 16:36:49 -0800 | [diff] [blame] | 2075 | } |
| 2076 | |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2077 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
| 2078 | { |
David Matlack | 114df30 | 2016-12-19 13:58:25 -0800 | [diff] [blame] | 2079 | return hash_64(gfn, KVM_MMU_HASH_SHIFT); |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2080 | } |
| 2081 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2082 | static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, |
| 2083 | struct kvm_mmu_page *sp, u64 *parent_pte) |
| 2084 | { |
| 2085 | if (!parent_pte) |
| 2086 | return; |
| 2087 | |
| 2088 | pte_list_add(vcpu, parent_pte, &sp->parent_ptes); |
| 2089 | } |
| 2090 | |
| 2091 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, |
| 2092 | u64 *parent_pte) |
| 2093 | { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 2094 | __pte_list_remove(parent_pte, &sp->parent_ptes); |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2095 | } |
| 2096 | |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2097 | static void drop_parent_pte(struct kvm_mmu_page *sp, |
| 2098 | u64 *parent_pte) |
| 2099 | { |
| 2100 | mmu_page_remove_parent_pte(sp, parent_pte); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 2101 | mmu_spte_clear_no_track(parent_pte); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2102 | } |
| 2103 | |
Takuya Yoshikawa | 4700579 | 2015-11-20 17:46:29 +0900 | [diff] [blame] | 2104 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2105 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2106 | struct kvm_mmu_page *sp; |
Takuya Yoshikawa | 7ddca7e | 2013-03-21 19:33:43 +0900 | [diff] [blame] | 2107 | |
Takuya Yoshikawa | 80feb89 | 2012-05-29 23:54:26 +0900 | [diff] [blame] | 2108 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); |
| 2109 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 2110 | if (!direct) |
Takuya Yoshikawa | 80feb89 | 2012-05-29 23:54:26 +0900 | [diff] [blame] | 2111 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2112 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 2113 | |
| 2114 | /* |
| 2115 | * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages() |
| 2116 | * depends on valid pages being added to the head of the list. See |
| 2117 | * comments in kvm_zap_obsolete_pages(). |
| 2118 | */ |
Sean Christopherson | ca333ad | 2019-09-12 19:46:11 -0700 | [diff] [blame] | 2119 | sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; |
Zhang Xiantao | f05e70a | 2007-12-14 10:01:48 +0800 | [diff] [blame] | 2120 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 2121 | kvm_mod_used_mmu_pages(vcpu->kvm, +1); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2122 | return sp; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2123 | } |
| 2124 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2125 | static void mark_unsync(u64 *spte); |
Xiao Guangrong | 6b18493 | 2010-04-16 21:29:17 +0800 | [diff] [blame] | 2126 | static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) |
Marcelo Tosatti | 0074ff6 | 2008-09-23 13:18:40 -0300 | [diff] [blame] | 2127 | { |
Takuya Yoshikawa | 74c4e63 | 2015-11-26 21:15:38 +0900 | [diff] [blame] | 2128 | u64 *sptep; |
| 2129 | struct rmap_iterator iter; |
| 2130 | |
| 2131 | for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { |
| 2132 | mark_unsync(sptep); |
| 2133 | } |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 2134 | } |
| 2135 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2136 | static void mark_unsync(u64 *spte) |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 2137 | { |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2138 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 2139 | unsigned int index; |
| 2140 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2141 | sp = page_header(__pa(spte)); |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 2142 | index = spte - sp->spt; |
| 2143 | if (__test_and_set_bit(index, sp->unsync_child_bitmap)) |
| 2144 | return; |
| 2145 | if (sp->unsync_children++) |
| 2146 | return; |
| 2147 | kvm_mmu_mark_parents_unsync(sp); |
Marcelo Tosatti | 0074ff6 | 2008-09-23 13:18:40 -0300 | [diff] [blame] | 2148 | } |
| 2149 | |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 2150 | static int nonpaging_sync_page(struct kvm_vcpu *vcpu, |
Xiao Guangrong | a4a8e6f | 2010-11-19 17:04:03 +0800 | [diff] [blame] | 2151 | struct kvm_mmu_page *sp) |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 2152 | { |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2153 | return 0; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 2154 | } |
| 2155 | |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 2156 | static void nonpaging_update_pte(struct kvm_vcpu *vcpu, |
| 2157 | struct kvm_mmu_page *sp, u64 *spte, |
Xiao Guangrong | 7c56252 | 2011-03-28 10:29:27 +0800 | [diff] [blame] | 2158 | const void *pte) |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 2159 | { |
| 2160 | WARN_ON(1); |
| 2161 | } |
| 2162 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2163 | #define KVM_PAGE_ARRAY_NR 16 |
| 2164 | |
| 2165 | struct kvm_mmu_pages { |
| 2166 | struct mmu_page_and_offset { |
| 2167 | struct kvm_mmu_page *sp; |
| 2168 | unsigned int idx; |
| 2169 | } page[KVM_PAGE_ARRAY_NR]; |
| 2170 | unsigned int nr; |
| 2171 | }; |
| 2172 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 2173 | static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, |
| 2174 | int idx) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2175 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2176 | int i; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2177 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2178 | if (sp->unsync) |
| 2179 | for (i=0; i < pvec->nr; i++) |
| 2180 | if (pvec->page[i].sp == sp) |
| 2181 | return 0; |
| 2182 | |
| 2183 | pvec->page[pvec->nr].sp = sp; |
| 2184 | pvec->page[pvec->nr].idx = idx; |
| 2185 | pvec->nr++; |
| 2186 | return (pvec->nr == KVM_PAGE_ARRAY_NR); |
| 2187 | } |
| 2188 | |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2189 | static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) |
| 2190 | { |
| 2191 | --sp->unsync_children; |
| 2192 | WARN_ON((int)sp->unsync_children < 0); |
| 2193 | __clear_bit(idx, sp->unsync_child_bitmap); |
| 2194 | } |
| 2195 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2196 | static int __mmu_unsync_walk(struct kvm_mmu_page *sp, |
| 2197 | struct kvm_mmu_pages *pvec) |
| 2198 | { |
| 2199 | int i, ret, nr_unsync_leaf = 0; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2200 | |
Takuya Yoshikawa | 37178b8 | 2011-11-29 14:02:45 +0900 | [diff] [blame] | 2201 | for_each_set_bit(i, sp->unsync_child_bitmap, 512) { |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2202 | struct kvm_mmu_page *child; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2203 | u64 ent = sp->spt[i]; |
| 2204 | |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2205 | if (!is_shadow_present_pte(ent) || is_large_pte(ent)) { |
| 2206 | clear_unsync_child_bit(sp, i); |
| 2207 | continue; |
| 2208 | } |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2209 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2210 | child = page_header(ent & PT64_BASE_ADDR_MASK); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2211 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2212 | if (child->unsync_children) { |
| 2213 | if (mmu_pages_add(pvec, child, i)) |
| 2214 | return -ENOSPC; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2215 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2216 | ret = __mmu_unsync_walk(child, pvec); |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2217 | if (!ret) { |
| 2218 | clear_unsync_child_bit(sp, i); |
| 2219 | continue; |
| 2220 | } else if (ret > 0) { |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2221 | nr_unsync_leaf += ret; |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2222 | } else |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2223 | return ret; |
| 2224 | } else if (child->unsync) { |
| 2225 | nr_unsync_leaf++; |
| 2226 | if (mmu_pages_add(pvec, child, i)) |
| 2227 | return -ENOSPC; |
| 2228 | } else |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2229 | clear_unsync_child_bit(sp, i); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2230 | } |
| 2231 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2232 | return nr_unsync_leaf; |
| 2233 | } |
| 2234 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 2235 | #define INVALID_INDEX (-1) |
| 2236 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2237 | static int mmu_unsync_walk(struct kvm_mmu_page *sp, |
| 2238 | struct kvm_mmu_pages *pvec) |
| 2239 | { |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2240 | pvec->nr = 0; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2241 | if (!sp->unsync_children) |
| 2242 | return 0; |
| 2243 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 2244 | mmu_pages_add(pvec, sp, INVALID_INDEX); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2245 | return __mmu_unsync_walk(sp, pvec); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2246 | } |
| 2247 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2248 | static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 2249 | { |
| 2250 | WARN_ON(!sp->unsync); |
Xiao Guangrong | 5e1b3dd | 2010-04-28 11:55:06 +0800 | [diff] [blame] | 2251 | trace_kvm_mmu_sync_page(sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2252 | sp->unsync = 0; |
| 2253 | --kvm->stat.mmu_unsync; |
| 2254 | } |
| 2255 | |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2256 | static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 2257 | struct list_head *invalid_list); |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2258 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 2259 | struct list_head *invalid_list); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2260 | |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 2261 | |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2262 | #define for_each_valid_sp(_kvm, _sp, _gfn) \ |
Takuya Yoshikawa | 1044b03 | 2013-03-06 16:05:07 +0900 | [diff] [blame] | 2263 | hlist_for_each_entry(_sp, \ |
| 2264 | &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ |
Sean Christopherson | fac026d | 2019-09-12 19:46:03 -0700 | [diff] [blame] | 2265 | if (is_obsolete_sp((_kvm), (_sp))) { \ |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2266 | } else |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2267 | |
Takuya Yoshikawa | 1044b03 | 2013-03-06 16:05:07 +0900 | [diff] [blame] | 2268 | #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2269 | for_each_valid_sp(_kvm, _sp, _gfn) \ |
| 2270 | if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2271 | |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 2272 | static inline bool is_ept_sp(struct kvm_mmu_page *sp) |
| 2273 | { |
| 2274 | return sp->role.cr0_wp && sp->role.smap_andnot_wp; |
| 2275 | } |
| 2276 | |
Xiao Guangrong | f918b44 | 2010-06-11 21:30:36 +0800 | [diff] [blame] | 2277 | /* @sp->gfn should be write-protected at the call site */ |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2278 | static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
| 2279 | struct list_head *invalid_list) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2280 | { |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 2281 | if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) || |
| 2282 | vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2283 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2284 | return false; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2285 | } |
| 2286 | |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2287 | return true; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2288 | } |
| 2289 | |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 2290 | static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, |
| 2291 | struct list_head *invalid_list, |
| 2292 | bool remote_flush) |
| 2293 | { |
Sean Christopherson | cfd32ac | 2019-04-12 19:55:41 -0700 | [diff] [blame] | 2294 | if (!remote_flush && list_empty(invalid_list)) |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 2295 | return false; |
| 2296 | |
| 2297 | if (!list_empty(invalid_list)) |
| 2298 | kvm_mmu_commit_zap_page(kvm, invalid_list); |
| 2299 | else |
| 2300 | kvm_flush_remote_tlbs(kvm); |
| 2301 | return true; |
| 2302 | } |
| 2303 | |
Paolo Bonzini | 35a7051 | 2016-02-24 10:03:27 +0100 | [diff] [blame] | 2304 | static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, |
| 2305 | struct list_head *invalid_list, |
| 2306 | bool remote_flush, bool local_flush) |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 2307 | { |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 2308 | if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) |
Paolo Bonzini | 35a7051 | 2016-02-24 10:03:27 +0100 | [diff] [blame] | 2309 | return; |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 2310 | |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 2311 | if (local_flush) |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 2312 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 2313 | } |
| 2314 | |
Xiao Guangrong | e37fa78 | 2011-11-30 17:43:24 +0800 | [diff] [blame] | 2315 | #ifdef CONFIG_KVM_MMU_AUDIT |
| 2316 | #include "mmu_audit.c" |
| 2317 | #else |
| 2318 | static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } |
| 2319 | static void mmu_audit_disable(void) { } |
| 2320 | #endif |
| 2321 | |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 2322 | static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 2323 | { |
Sean Christopherson | fac026d | 2019-09-12 19:46:03 -0700 | [diff] [blame] | 2324 | return sp->role.invalid || |
| 2325 | unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 2326 | } |
| 2327 | |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2328 | static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2329 | struct list_head *invalid_list) |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 2330 | { |
Paolo Bonzini | 9a43c5d | 2016-02-24 10:28:01 +0100 | [diff] [blame] | 2331 | kvm_unlink_unsync_page(vcpu->kvm, sp); |
| 2332 | return __kvm_sync_page(vcpu, sp, invalid_list); |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 2333 | } |
| 2334 | |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2335 | /* @gfn should be write-protected at the call site */ |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2336 | static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2337 | struct list_head *invalid_list) |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2338 | { |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2339 | struct kvm_mmu_page *s; |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2340 | bool ret = false; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2341 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 2342 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2343 | if (!s->unsync) |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2344 | continue; |
| 2345 | |
| 2346 | WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2347 | ret |= kvm_sync_page(vcpu, s, invalid_list); |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2348 | } |
| 2349 | |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2350 | return ret; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2351 | } |
| 2352 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2353 | struct mmu_page_path { |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 2354 | struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL]; |
| 2355 | unsigned int idx[PT64_ROOT_MAX_LEVEL]; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2356 | }; |
| 2357 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2358 | #define for_each_sp(pvec, sp, parents, i) \ |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2359 | for (i = mmu_pages_first(&pvec, &parents); \ |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2360 | i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ |
| 2361 | i = mmu_pages_next(&pvec, &parents, i)) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2362 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 2363 | static int mmu_pages_next(struct kvm_mmu_pages *pvec, |
| 2364 | struct mmu_page_path *parents, |
| 2365 | int i) |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2366 | { |
| 2367 | int n; |
| 2368 | |
| 2369 | for (n = i+1; n < pvec->nr; n++) { |
| 2370 | struct kvm_mmu_page *sp = pvec->page[n].sp; |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2371 | unsigned idx = pvec->page[n].idx; |
| 2372 | int level = sp->role.level; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2373 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2374 | parents->idx[level-1] = idx; |
| 2375 | if (level == PT_PAGE_TABLE_LEVEL) |
| 2376 | break; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2377 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2378 | parents->parent[level-2] = sp; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2379 | } |
| 2380 | |
| 2381 | return n; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2382 | } |
| 2383 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2384 | static int mmu_pages_first(struct kvm_mmu_pages *pvec, |
| 2385 | struct mmu_page_path *parents) |
| 2386 | { |
| 2387 | struct kvm_mmu_page *sp; |
| 2388 | int level; |
| 2389 | |
| 2390 | if (pvec->nr == 0) |
| 2391 | return 0; |
| 2392 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 2393 | WARN_ON(pvec->page[0].idx != INVALID_INDEX); |
| 2394 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2395 | sp = pvec->page[0].sp; |
| 2396 | level = sp->role.level; |
| 2397 | WARN_ON(level == PT_PAGE_TABLE_LEVEL); |
| 2398 | |
| 2399 | parents->parent[level-2] = sp; |
| 2400 | |
| 2401 | /* Also set up a sentinel. Further entries in pvec are all |
| 2402 | * children of sp, so this element is never overwritten. |
| 2403 | */ |
| 2404 | parents->parent[level-1] = NULL; |
| 2405 | return mmu_pages_next(pvec, parents, 0); |
| 2406 | } |
| 2407 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 2408 | static void mmu_pages_clear_parents(struct mmu_page_path *parents) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2409 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2410 | struct kvm_mmu_page *sp; |
| 2411 | unsigned int level = 0; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2412 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2413 | do { |
| 2414 | unsigned int idx = parents->idx[level]; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2415 | sp = parents->parent[level]; |
| 2416 | if (!sp) |
| 2417 | return; |
| 2418 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 2419 | WARN_ON(idx == INVALID_INDEX); |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2420 | clear_unsync_child_bit(sp, idx); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2421 | level++; |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2422 | } while (!sp->unsync_children); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2423 | } |
| 2424 | |
| 2425 | static void mmu_sync_children(struct kvm_vcpu *vcpu, |
| 2426 | struct kvm_mmu_page *parent) |
| 2427 | { |
| 2428 | int i; |
| 2429 | struct kvm_mmu_page *sp; |
| 2430 | struct mmu_page_path parents; |
| 2431 | struct kvm_mmu_pages pages; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2432 | LIST_HEAD(invalid_list); |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 2433 | bool flush = false; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2434 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2435 | while (mmu_unsync_walk(parent, &pages)) { |
Xiao Guangrong | 2f84569 | 2012-06-20 15:56:53 +0800 | [diff] [blame] | 2436 | bool protected = false; |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 2437 | |
| 2438 | for_each_sp(pages, sp, parents, i) |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 2439 | protected |= rmap_write_protect(vcpu, sp->gfn); |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 2440 | |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 2441 | if (protected) { |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 2442 | kvm_flush_remote_tlbs(vcpu->kvm); |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 2443 | flush = false; |
| 2444 | } |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 2445 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2446 | for_each_sp(pages, sp, parents, i) { |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2447 | flush |= kvm_sync_page(vcpu, sp, &invalid_list); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2448 | mmu_pages_clear_parents(&parents); |
| 2449 | } |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 2450 | if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { |
| 2451 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); |
| 2452 | cond_resched_lock(&vcpu->kvm->mmu_lock); |
| 2453 | flush = false; |
| 2454 | } |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2455 | } |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 2456 | |
| 2457 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2458 | } |
| 2459 | |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 2460 | static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) |
| 2461 | { |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 2462 | atomic_set(&sp->write_flooding_count, 0); |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 2463 | } |
| 2464 | |
| 2465 | static void clear_sp_write_flooding_count(u64 *spte) |
| 2466 | { |
| 2467 | struct kvm_mmu_page *sp = page_header(__pa(spte)); |
| 2468 | |
| 2469 | __clear_sp_write_flooding_count(sp); |
| 2470 | } |
| 2471 | |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2472 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, |
| 2473 | gfn_t gfn, |
| 2474 | gva_t gaddr, |
| 2475 | unsigned level, |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 2476 | int direct, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 2477 | unsigned int access) |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2478 | { |
| 2479 | union kvm_mmu_page_role role; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2480 | unsigned quadrant; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2481 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2482 | bool need_sync = false; |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2483 | bool flush = false; |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2484 | int collisions = 0; |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2485 | LIST_HEAD(invalid_list); |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2486 | |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 2487 | role = vcpu->arch.mmu->mmu_role.base; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2488 | role.level = level; |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 2489 | role.direct = direct; |
Avi Kivity | 84b0c8c | 2010-03-14 10:16:40 +0200 | [diff] [blame] | 2490 | if (role.direct) |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 2491 | role.gpte_is_8_bytes = true; |
Avi Kivity | 41074d0 | 2007-12-09 17:00:02 +0200 | [diff] [blame] | 2492 | role.access = access; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2493 | if (!vcpu->arch.mmu->direct_map |
| 2494 | && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2495 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); |
| 2496 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; |
| 2497 | role.quadrant = quadrant; |
| 2498 | } |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2499 | for_each_valid_sp(vcpu->kvm, sp, gfn) { |
| 2500 | if (sp->gfn != gfn) { |
| 2501 | collisions++; |
| 2502 | continue; |
| 2503 | } |
| 2504 | |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2505 | if (!need_sync && sp->unsync) |
| 2506 | need_sync = true; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2507 | |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2508 | if (sp->role.word != role.word) |
| 2509 | continue; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2510 | |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2511 | if (sp->unsync) { |
| 2512 | /* The page is good, but __kvm_sync_page might still end |
| 2513 | * up zapping it. If so, break in order to rebuild it. |
| 2514 | */ |
| 2515 | if (!__kvm_sync_page(vcpu, sp, &invalid_list)) |
| 2516 | break; |
| 2517 | |
| 2518 | WARN_ON(!list_empty(&invalid_list)); |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 2519 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2520 | } |
Xiao Guangrong | e02aa90 | 2010-05-15 18:52:34 +0800 | [diff] [blame] | 2521 | |
Takuya Yoshikawa | 98bba23 | 2015-11-26 21:14:34 +0900 | [diff] [blame] | 2522 | if (sp->unsync_children) |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 2523 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Xiao Guangrong | e02aa90 | 2010-05-15 18:52:34 +0800 | [diff] [blame] | 2524 | |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 2525 | __clear_sp_write_flooding_count(sp); |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2526 | trace_kvm_mmu_get_page(sp, false); |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2527 | goto out; |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2528 | } |
Takuya Yoshikawa | 4700579 | 2015-11-20 17:46:29 +0900 | [diff] [blame] | 2529 | |
Avi Kivity | dfc5aa0 | 2007-12-18 19:47:18 +0200 | [diff] [blame] | 2530 | ++vcpu->kvm->stat.mmu_cache_miss; |
Takuya Yoshikawa | 4700579 | 2015-11-20 17:46:29 +0900 | [diff] [blame] | 2531 | |
| 2532 | sp = kvm_mmu_alloc_page(vcpu, direct); |
| 2533 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2534 | sp->gfn = gfn; |
| 2535 | sp->role = role; |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2536 | hlist_add_head(&sp->hash_link, |
| 2537 | &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 2538 | if (!direct) { |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 2539 | /* |
| 2540 | * we should do write protection before syncing pages |
| 2541 | * otherwise the content of the synced shadow page may |
| 2542 | * be inconsistent with guest page table. |
| 2543 | */ |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 2544 | account_shadowed(vcpu->kvm, sp); |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 2545 | if (level == PT_PAGE_TABLE_LEVEL && |
| 2546 | rmap_write_protect(vcpu, gfn)) |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 2547 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1); |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 2548 | |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2549 | if (level > PT_PAGE_TABLE_LEVEL && need_sync) |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2550 | flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2551 | } |
Takuya Yoshikawa | 7749266 | 2015-12-18 18:54:49 +0900 | [diff] [blame] | 2552 | clear_page(sp->spt); |
Avi Kivity | f691fe1 | 2009-07-06 15:58:14 +0300 | [diff] [blame] | 2553 | trace_kvm_mmu_get_page(sp, true); |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2554 | |
| 2555 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2556 | out: |
| 2557 | if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions) |
| 2558 | vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions; |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2559 | return sp; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2560 | } |
| 2561 | |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2562 | static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator, |
| 2563 | struct kvm_vcpu *vcpu, hpa_t root, |
| 2564 | u64 addr) |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2565 | { |
| 2566 | iterator->addr = addr; |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2567 | iterator->shadow_addr = root; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2568 | iterator->level = vcpu->arch.mmu->shadow_root_level; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2569 | |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 2570 | if (iterator->level == PT64_ROOT_4LEVEL && |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2571 | vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && |
| 2572 | !vcpu->arch.mmu->direct_map) |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2573 | --iterator->level; |
| 2574 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2575 | if (iterator->level == PT32E_ROOT_LEVEL) { |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2576 | /* |
| 2577 | * prev_root is currently only used for 64-bit hosts. So only |
| 2578 | * the active root_hpa is valid here. |
| 2579 | */ |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2580 | BUG_ON(root != vcpu->arch.mmu->root_hpa); |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2581 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2582 | iterator->shadow_addr |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2583 | = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2584 | iterator->shadow_addr &= PT64_BASE_ADDR_MASK; |
| 2585 | --iterator->level; |
| 2586 | if (!iterator->shadow_addr) |
| 2587 | iterator->level = 0; |
| 2588 | } |
| 2589 | } |
| 2590 | |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2591 | static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, |
| 2592 | struct kvm_vcpu *vcpu, u64 addr) |
| 2593 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2594 | shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2595 | addr); |
| 2596 | } |
| 2597 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2598 | static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) |
| 2599 | { |
| 2600 | if (iterator->level < PT_PAGE_TABLE_LEVEL) |
| 2601 | return false; |
Marcelo Tosatti | 4d88954 | 2009-06-11 12:07:41 -0300 | [diff] [blame] | 2602 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2603 | iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); |
| 2604 | iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; |
| 2605 | return true; |
| 2606 | } |
| 2607 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2608 | static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, |
| 2609 | u64 spte) |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2610 | { |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2611 | if (is_last_spte(spte, iterator->level)) { |
Xiao Guangrong | 052331b | 2011-07-12 03:21:17 +0800 | [diff] [blame] | 2612 | iterator->level = 0; |
| 2613 | return; |
| 2614 | } |
| 2615 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2616 | iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2617 | --iterator->level; |
| 2618 | } |
| 2619 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2620 | static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) |
| 2621 | { |
David Hildenbrand | bb606a9 | 2017-08-24 20:51:23 +0200 | [diff] [blame] | 2622 | __shadow_walk_next(iterator, *iterator->sptep); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2623 | } |
| 2624 | |
Takuya Yoshikawa | 98bba23 | 2015-11-26 21:14:34 +0900 | [diff] [blame] | 2625 | static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, |
| 2626 | struct kvm_mmu_page *sp) |
Avi Kivity | 32ef26a | 2010-07-13 14:27:04 +0300 | [diff] [blame] | 2627 | { |
| 2628 | u64 spte; |
| 2629 | |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 2630 | BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); |
Yang Zhang | 7a1638c | 2013-08-05 11:07:13 +0300 | [diff] [blame] | 2631 | |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 2632 | spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK | |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 2633 | shadow_user_mask | shadow_x_mask | shadow_me_mask; |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 2634 | |
| 2635 | if (sp_ad_disabled(sp)) |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 2636 | spte |= SPTE_AD_DISABLED_MASK; |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 2637 | else |
| 2638 | spte |= shadow_accessed_mask; |
Xiao Guangrong | 24db273 | 2013-02-05 15:28:02 +0800 | [diff] [blame] | 2639 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 2640 | mmu_spte_set(sptep, spte); |
Takuya Yoshikawa | 98bba23 | 2015-11-26 21:14:34 +0900 | [diff] [blame] | 2641 | |
| 2642 | mmu_page_add_parent_pte(vcpu, sp, sptep); |
| 2643 | |
| 2644 | if (sp->unsync_children || sp->unsync) |
| 2645 | mark_unsync(sptep); |
Avi Kivity | 32ef26a | 2010-07-13 14:27:04 +0300 | [diff] [blame] | 2646 | } |
| 2647 | |
Avi Kivity | a357bd2 | 2010-07-13 14:27:07 +0300 | [diff] [blame] | 2648 | static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
| 2649 | unsigned direct_access) |
| 2650 | { |
| 2651 | if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { |
| 2652 | struct kvm_mmu_page *child; |
| 2653 | |
| 2654 | /* |
| 2655 | * For the direct sp, if the guest pte's dirty bit |
| 2656 | * changed form clean to dirty, it will corrupt the |
| 2657 | * sp's access: allow writable in the read-only sp, |
| 2658 | * so we should update the spte at this point to get |
| 2659 | * a new sp with the correct access. |
| 2660 | */ |
| 2661 | child = page_header(*sptep & PT64_BASE_ADDR_MASK); |
| 2662 | if (child->role.access == direct_access) |
| 2663 | return; |
| 2664 | |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2665 | drop_parent_pte(child, sptep); |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 2666 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1); |
Avi Kivity | a357bd2 | 2010-07-13 14:27:07 +0300 | [diff] [blame] | 2667 | } |
| 2668 | } |
| 2669 | |
Xiao Guangrong | 505aef8 | 2011-09-22 16:56:06 +0800 | [diff] [blame] | 2670 | static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2671 | u64 *spte) |
| 2672 | { |
| 2673 | u64 pte; |
| 2674 | struct kvm_mmu_page *child; |
| 2675 | |
| 2676 | pte = *spte; |
| 2677 | if (is_shadow_present_pte(pte)) { |
Xiao Guangrong | 505aef8 | 2011-09-22 16:56:06 +0800 | [diff] [blame] | 2678 | if (is_last_spte(pte, sp->role.level)) { |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 2679 | drop_spte(kvm, spte); |
Xiao Guangrong | 505aef8 | 2011-09-22 16:56:06 +0800 | [diff] [blame] | 2680 | if (is_large_pte(pte)) |
| 2681 | --kvm->stat.lpages; |
| 2682 | } else { |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2683 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2684 | drop_parent_pte(child, spte); |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2685 | } |
Xiao Guangrong | 505aef8 | 2011-09-22 16:56:06 +0800 | [diff] [blame] | 2686 | return true; |
| 2687 | } |
| 2688 | |
| 2689 | if (is_mmio_spte(pte)) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 2690 | mmu_spte_clear_no_track(spte); |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 2691 | |
Xiao Guangrong | 505aef8 | 2011-09-22 16:56:06 +0800 | [diff] [blame] | 2692 | return false; |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2693 | } |
| 2694 | |
Avi Kivity | 90cb052 | 2007-07-17 13:04:56 +0300 | [diff] [blame] | 2695 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2696 | struct kvm_mmu_page *sp) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2697 | { |
Avi Kivity | 697fe2e | 2007-01-05 16:36:46 -0800 | [diff] [blame] | 2698 | unsigned i; |
Avi Kivity | 697fe2e | 2007-01-05 16:36:46 -0800 | [diff] [blame] | 2699 | |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2700 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
| 2701 | mmu_page_zap_pte(kvm, sp, sp->spt + i); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2702 | } |
| 2703 | |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2704 | static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2705 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 2706 | u64 *sptep; |
| 2707 | struct rmap_iterator iter; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2708 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 2709 | while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 2710 | drop_parent_pte(sp, sptep); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2711 | } |
| 2712 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2713 | static int mmu_zap_unsync_children(struct kvm *kvm, |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2714 | struct kvm_mmu_page *parent, |
| 2715 | struct list_head *invalid_list) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2716 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2717 | int i, zapped = 0; |
| 2718 | struct mmu_page_path parents; |
| 2719 | struct kvm_mmu_pages pages; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2720 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2721 | if (parent->role.level == PT_PAGE_TABLE_LEVEL) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2722 | return 0; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2723 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2724 | while (mmu_unsync_walk(parent, &pages)) { |
| 2725 | struct kvm_mmu_page *sp; |
| 2726 | |
| 2727 | for_each_sp(pages, sp, parents, i) { |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2728 | kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2729 | mmu_pages_clear_parents(&parents); |
Xiao Guangrong | 77662e0 | 2010-04-16 16:34:42 +0800 | [diff] [blame] | 2730 | zapped++; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2731 | } |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2732 | } |
| 2733 | |
| 2734 | return zapped; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2735 | } |
| 2736 | |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2737 | static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, |
| 2738 | struct kvm_mmu_page *sp, |
| 2739 | struct list_head *invalid_list, |
| 2740 | int *nr_zapped) |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2741 | { |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2742 | bool list_unstable; |
Avi Kivity | f691fe1 | 2009-07-06 15:58:14 +0300 | [diff] [blame] | 2743 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2744 | trace_kvm_mmu_prepare_zap_page(sp); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2745 | ++kvm->stat.mmu_shadow_zapped; |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2746 | *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2747 | kvm_mmu_page_unlink_children(kvm, sp); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2748 | kvm_mmu_unlink_parents(kvm, sp); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 2749 | |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2750 | /* Zapping children means active_mmu_pages has become unstable. */ |
| 2751 | list_unstable = *nr_zapped; |
| 2752 | |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 2753 | if (!sp->role.invalid && !sp->role.direct) |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 2754 | unaccount_shadowed(kvm, sp); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 2755 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2756 | if (sp->unsync) |
| 2757 | kvm_unlink_unsync_page(kvm, sp); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2758 | if (!sp->root_count) { |
Gui Jianfeng | 54a4f02 | 2010-05-05 09:03:49 +0800 | [diff] [blame] | 2759 | /* Count self */ |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2760 | (*nr_zapped)++; |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2761 | list_move(&sp->link, invalid_list); |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame] | 2762 | kvm_mod_used_mmu_pages(kvm, -1); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 2763 | } else { |
Avi Kivity | 5b5c6a5 | 2008-07-11 18:07:26 +0300 | [diff] [blame] | 2764 | list_move(&sp->link, &kvm->arch.active_mmu_pages); |
Gleb Natapov | 05988d7 | 2013-05-31 08:36:30 +0800 | [diff] [blame] | 2765 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 2766 | /* |
| 2767 | * Obsolete pages cannot be used on any vCPUs, see the comment |
| 2768 | * in kvm_mmu_zap_all_fast(). Note, is_obsolete_sp() also |
| 2769 | * treats invalid shadow pages as being obsolete. |
| 2770 | */ |
| 2771 | if (!is_obsolete_sp(kvm, sp)) |
Gleb Natapov | 05988d7 | 2013-05-31 08:36:30 +0800 | [diff] [blame] | 2772 | kvm_reload_remote_mmus(kvm); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 2773 | } |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2774 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2775 | if (sp->lpage_disallowed) |
| 2776 | unaccount_huge_nx_page(kvm, sp); |
| 2777 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2778 | sp->role.invalid = 1; |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2779 | return list_unstable; |
| 2780 | } |
| 2781 | |
| 2782 | static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 2783 | struct list_head *invalid_list) |
| 2784 | { |
| 2785 | int nr_zapped; |
| 2786 | |
| 2787 | __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); |
| 2788 | return nr_zapped; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2789 | } |
| 2790 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2791 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 2792 | struct list_head *invalid_list) |
| 2793 | { |
Takuya Yoshikawa | 945315b | 2013-03-06 16:05:52 +0900 | [diff] [blame] | 2794 | struct kvm_mmu_page *sp, *nsp; |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2795 | |
| 2796 | if (list_empty(invalid_list)) |
| 2797 | return; |
| 2798 | |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 2799 | /* |
Lan Tianyu | 9753f52 | 2016-03-13 11:10:24 +0800 | [diff] [blame] | 2800 | * We need to make sure everyone sees our modifications to |
| 2801 | * the page tables and see changes to vcpu->mode here. The barrier |
| 2802 | * in the kvm_flush_remote_tlbs() achieves this. This pairs |
| 2803 | * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end. |
| 2804 | * |
| 2805 | * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit |
| 2806 | * guest mode and/or lockless shadow page table walks. |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 2807 | */ |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2808 | kvm_flush_remote_tlbs(kvm); |
| 2809 | |
Takuya Yoshikawa | 945315b | 2013-03-06 16:05:52 +0900 | [diff] [blame] | 2810 | list_for_each_entry_safe(sp, nsp, invalid_list, link) { |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2811 | WARN_ON(!sp->role.invalid || sp->root_count); |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame] | 2812 | kvm_mmu_free_page(sp); |
Takuya Yoshikawa | 945315b | 2013-03-06 16:05:52 +0900 | [diff] [blame] | 2813 | } |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2814 | } |
| 2815 | |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2816 | static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, |
| 2817 | struct list_head *invalid_list) |
| 2818 | { |
| 2819 | struct kvm_mmu_page *sp; |
| 2820 | |
| 2821 | if (list_empty(&kvm->arch.active_mmu_pages)) |
| 2822 | return false; |
| 2823 | |
Geliang Tang | d74c0e6 | 2016-01-01 19:47:14 +0800 | [diff] [blame] | 2824 | sp = list_last_entry(&kvm->arch.active_mmu_pages, |
| 2825 | struct kvm_mmu_page, link); |
Wanpeng Li | 42bcbeb | 2017-08-10 06:55:51 -0700 | [diff] [blame] | 2826 | return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2827 | } |
| 2828 | |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2829 | static int make_mmu_pages_available(struct kvm_vcpu *vcpu) |
| 2830 | { |
| 2831 | LIST_HEAD(invalid_list); |
| 2832 | |
| 2833 | if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) |
| 2834 | return 0; |
| 2835 | |
| 2836 | while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { |
| 2837 | if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) |
| 2838 | break; |
| 2839 | |
| 2840 | ++vcpu->kvm->stat.mmu_recycled; |
| 2841 | } |
| 2842 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 2843 | |
| 2844 | if (!kvm_mmu_available_pages(vcpu->kvm)) |
| 2845 | return -ENOSPC; |
| 2846 | return 0; |
| 2847 | } |
| 2848 | |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2849 | /* |
| 2850 | * Changing the number of mmu pages allocated to the vm |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2851 | * Note: if goal_nr_mmu_pages is too small, you will get dead lock |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2852 | */ |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 2853 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2854 | { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2855 | LIST_HEAD(invalid_list); |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2856 | |
Takuya Yoshikawa | b34cb59 | 2013-01-08 19:46:07 +0900 | [diff] [blame] | 2857 | spin_lock(&kvm->mmu_lock); |
| 2858 | |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2859 | if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2860 | /* Need to free some mmu pages to achieve the goal. */ |
| 2861 | while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) |
| 2862 | if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list)) |
| 2863 | break; |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2864 | |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame] | 2865 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2866 | goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2867 | } |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2868 | |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2869 | kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; |
Takuya Yoshikawa | b34cb59 | 2013-01-08 19:46:07 +0900 | [diff] [blame] | 2870 | |
| 2871 | spin_unlock(&kvm->mmu_lock); |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2872 | } |
| 2873 | |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 2874 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2875 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2876 | struct kvm_mmu_page *sp; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2877 | LIST_HEAD(invalid_list); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2878 | int r; |
| 2879 | |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 2880 | pgprintk("%s: looking for gfn %llx\n", __func__, gfn); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2881 | r = 0; |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 2882 | spin_lock(&kvm->mmu_lock); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 2883 | for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 2884 | pgprintk("%s: gfn %llx role %x\n", __func__, gfn, |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2885 | sp->role.word); |
| 2886 | r = 1; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 2887 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2888 | } |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2889 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 2890 | spin_unlock(&kvm->mmu_lock); |
| 2891 | |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2892 | return r; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2893 | } |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 2894 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2895 | |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2896 | static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2897 | { |
Xiao Guangrong | 5e1b3dd | 2010-04-28 11:55:06 +0800 | [diff] [blame] | 2898 | trace_kvm_mmu_unsync_page(sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2899 | ++vcpu->kvm->stat.mmu_unsync; |
| 2900 | sp->unsync = 1; |
Marcelo Tosatti | 6cffe8c | 2008-12-01 22:32:04 -0200 | [diff] [blame] | 2901 | |
Xiao Guangrong | 6b18493 | 2010-04-16 21:29:17 +0800 | [diff] [blame] | 2902 | kvm_mmu_mark_parents_unsync(sp); |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2903 | } |
| 2904 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2905 | static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2906 | bool can_unsync) |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2907 | { |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2908 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2909 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2910 | if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) |
| 2911 | return true; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2912 | |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2913 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { |
Xiao Guangrong | 36a2e67 | 2010-06-30 16:02:02 +0800 | [diff] [blame] | 2914 | if (!can_unsync) |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2915 | return true; |
Xiao Guangrong | 36a2e67 | 2010-06-30 16:02:02 +0800 | [diff] [blame] | 2916 | |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2917 | if (sp->unsync) |
| 2918 | continue; |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2919 | |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2920 | WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL); |
| 2921 | kvm_unsync_page(vcpu, sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2922 | } |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2923 | |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 2924 | /* |
| 2925 | * We need to ensure that the marking of unsync pages is visible |
| 2926 | * before the SPTE is updated to allow writes because |
| 2927 | * kvm_mmu_sync_roots() checks the unsync flags without holding |
| 2928 | * the MMU lock and so can race with this. If the SPTE was updated |
| 2929 | * before the page had been marked as unsync-ed, something like the |
| 2930 | * following could happen: |
| 2931 | * |
| 2932 | * CPU 1 CPU 2 |
| 2933 | * --------------------------------------------------------------------- |
| 2934 | * 1.2 Host updates SPTE |
| 2935 | * to be writable |
| 2936 | * 2.1 Guest writes a GPTE for GVA X. |
| 2937 | * (GPTE being in the guest page table shadowed |
| 2938 | * by the SP from CPU 1.) |
| 2939 | * This reads SPTE during the page table walk. |
| 2940 | * Since SPTE.W is read as 1, there is no |
| 2941 | * fault. |
| 2942 | * |
| 2943 | * 2.2 Guest issues TLB flush. |
| 2944 | * That causes a VM Exit. |
| 2945 | * |
| 2946 | * 2.3 kvm_mmu_sync_pages() reads sp->unsync. |
| 2947 | * Since it is false, so it just returns. |
| 2948 | * |
| 2949 | * 2.4 Guest accesses GVA X. |
| 2950 | * Since the mapping in the SP was not updated, |
| 2951 | * so the old mapping for GVA X incorrectly |
| 2952 | * gets used. |
| 2953 | * 1.1 Host marks SP |
| 2954 | * as unsync |
| 2955 | * (sp->unsync = true) |
| 2956 | * |
| 2957 | * The write barrier below ensures that 1.1 happens before 1.2 and thus |
| 2958 | * the situation in 2.4 does not arise. The implicit barrier in 2.2 |
| 2959 | * pairs with this write barrier. |
| 2960 | */ |
| 2961 | smp_wmb(); |
| 2962 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2963 | return false; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2964 | } |
| 2965 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2966 | static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) |
Paolo Bonzini | d1fe921 | 2015-07-07 15:03:18 +0200 | [diff] [blame] | 2967 | { |
| 2968 | if (pfn_valid(pfn)) |
Haozhong Zhang | aa2e063 | 2017-12-20 15:29:29 +0800 | [diff] [blame] | 2969 | return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && |
| 2970 | /* |
| 2971 | * Some reserved pages, such as those from NVDIMM |
| 2972 | * DAX devices, are not for MMIO, and can be mapped |
| 2973 | * with cached memory type for better performance. |
| 2974 | * However, the above check misconceives those pages |
| 2975 | * as MMIO, and results in KVM mapping them with UC |
| 2976 | * memory type, which would hurt the performance. |
| 2977 | * Therefore, we check the host memory type in addition |
| 2978 | * and only treat UC/UC-/WC pages as MMIO. |
| 2979 | */ |
| 2980 | (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); |
Paolo Bonzini | d1fe921 | 2015-07-07 15:03:18 +0200 | [diff] [blame] | 2981 | |
KarimAllah Ahmed | 0c55671 | 2019-01-31 21:24:44 +0100 | [diff] [blame] | 2982 | return !e820__mapped_raw_any(pfn_to_hpa(pfn), |
| 2983 | pfn_to_hpa(pfn + 1) - 1, |
| 2984 | E820_TYPE_RAM); |
Paolo Bonzini | d1fe921 | 2015-07-07 15:03:18 +0200 | [diff] [blame] | 2985 | } |
| 2986 | |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 2987 | /* Bits which may be returned by set_spte() */ |
| 2988 | #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) |
| 2989 | #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) |
| 2990 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2991 | static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 2992 | unsigned int pte_access, int level, |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2993 | gfn_t gfn, kvm_pfn_t pfn, bool speculative, |
Lai Jiangshan | 9bdbba1 | 2010-11-19 17:03:22 +0800 | [diff] [blame] | 2994 | bool can_unsync, bool host_writable) |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 2995 | { |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 2996 | u64 spte = 0; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2997 | int ret = 0; |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 2998 | struct kvm_mmu_page *sp; |
Sheng Yang | 64d4d52 | 2008-10-09 16:01:57 +0800 | [diff] [blame] | 2999 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 3000 | if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3001 | return 0; |
| 3002 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3003 | sp = page_header(__pa(sptep)); |
| 3004 | if (sp_ad_disabled(sp)) |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 3005 | spte |= SPTE_AD_DISABLED_MASK; |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 3006 | else if (kvm_vcpu_ad_need_write_protect(vcpu)) |
| 3007 | spte |= SPTE_AD_WRPROT_ONLY_MASK; |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3008 | |
Bandan Das | d95c556 | 2016-07-12 18:18:51 -0400 | [diff] [blame] | 3009 | /* |
| 3010 | * For the EPT case, shadow_present_mask is 0 if hardware |
| 3011 | * supports exec-only page table entries. In that case, |
| 3012 | * ACC_USER_MASK and shadow_user_mask are used to represent |
| 3013 | * read access. See FNAME(gpte_access) in paging_tmpl.h. |
| 3014 | */ |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 3015 | spte |= shadow_present_mask; |
Avi Kivity | 947da53 | 2008-03-18 11:05:52 +0200 | [diff] [blame] | 3016 | if (!speculative) |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3017 | spte |= spte_shadow_accessed_mask(spte); |
Xiao Guangrong | 640d9b0 | 2011-07-12 03:24:39 +0800 | [diff] [blame] | 3018 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 3019 | if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) && |
| 3020 | is_nx_huge_page_enabled()) { |
| 3021 | pte_access &= ~ACC_EXEC_MASK; |
| 3022 | } |
| 3023 | |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 3024 | if (pte_access & ACC_EXEC_MASK) |
| 3025 | spte |= shadow_x_mask; |
| 3026 | else |
| 3027 | spte |= shadow_nx_mask; |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 3028 | |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3029 | if (pte_access & ACC_USER_MASK) |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 3030 | spte |= shadow_user_mask; |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 3031 | |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 3032 | if (level > PT_PAGE_TABLE_LEVEL) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 3033 | spte |= PT_PAGE_SIZE_MASK; |
Avi Kivity | b0bc3ee | 2010-09-13 16:45:28 +0200 | [diff] [blame] | 3034 | if (tdp_enabled) |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 3035 | spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn, |
Paolo Bonzini | d1fe921 | 2015-07-07 15:03:18 +0200 | [diff] [blame] | 3036 | kvm_is_mmio_pfn(pfn)); |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3037 | |
Lai Jiangshan | 9bdbba1 | 2010-11-19 17:03:22 +0800 | [diff] [blame] | 3038 | if (host_writable) |
Izik Eidus | 1403283 | 2009-09-23 21:47:17 +0300 | [diff] [blame] | 3039 | spte |= SPTE_HOST_WRITEABLE; |
Xiao Guangrong | f8e453b | 2010-12-23 16:09:29 +0800 | [diff] [blame] | 3040 | else |
| 3041 | pte_access &= ~ACC_WRITE_MASK; |
Izik Eidus | 1403283 | 2009-09-23 21:47:17 +0300 | [diff] [blame] | 3042 | |
Tom Lendacky | daaf216 | 2018-03-08 17:17:31 -0600 | [diff] [blame] | 3043 | if (!kvm_is_mmio_pfn(pfn)) |
| 3044 | spte |= shadow_me_mask; |
| 3045 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 3046 | spte |= (u64)pfn << PAGE_SHIFT; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3047 | |
Xiao Guangrong | c2288505 | 2013-01-08 14:36:04 +0800 | [diff] [blame] | 3048 | if (pte_access & ACC_WRITE_MASK) { |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 3049 | spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3050 | |
Marcelo Tosatti | ecc5589 | 2008-11-25 15:58:07 +0100 | [diff] [blame] | 3051 | /* |
| 3052 | * Optimization: for pte sync, if spte was writable the hash |
| 3053 | * lookup is unnecessary (and expensive). Write protection |
| 3054 | * is responsibility of mmu_get_page / kvm_sync_page. |
| 3055 | * Same reasoning can be applied to dirty page accounting. |
| 3056 | */ |
Takuya Yoshikawa | 8dae444 | 2010-01-18 18:45:10 +0900 | [diff] [blame] | 3057 | if (!can_unsync && is_writable_pte(*sptep)) |
Marcelo Tosatti | ecc5589 | 2008-11-25 15:58:07 +0100 | [diff] [blame] | 3058 | goto set_pte; |
| 3059 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 3060 | if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 3061 | pgprintk("%s: found shadow page for %llx, marking ro\n", |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 3062 | __func__, gfn); |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 3063 | ret |= SET_SPTE_WRITE_PROTECTED_PT; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3064 | pte_access &= ~ACC_WRITE_MASK; |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 3065 | spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3066 | } |
| 3067 | } |
| 3068 | |
Kai Huang | 9b51a63 | 2015-01-28 10:54:25 +0800 | [diff] [blame] | 3069 | if (pte_access & ACC_WRITE_MASK) { |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 3070 | kvm_vcpu_mark_page_dirty(vcpu, gfn); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3071 | spte |= spte_shadow_dirty_mask(spte); |
Kai Huang | 9b51a63 | 2015-01-28 10:54:25 +0800 | [diff] [blame] | 3072 | } |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3073 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3074 | if (speculative) |
| 3075 | spte = mark_spte_for_access_track(spte); |
| 3076 | |
Marcelo Tosatti | 38187c8 | 2008-09-23 13:18:32 -0300 | [diff] [blame] | 3077 | set_pte: |
Xiao Guangrong | 6e7d035 | 2012-06-20 15:58:33 +0800 | [diff] [blame] | 3078 | if (mmu_spte_update(sptep, spte)) |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 3079 | ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3080 | return ret; |
| 3081 | } |
| 3082 | |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 3083 | static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
| 3084 | unsigned int pte_access, int write_fault, int level, |
| 3085 | gfn_t gfn, kvm_pfn_t pfn, bool speculative, |
| 3086 | bool host_writable) |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3087 | { |
| 3088 | int was_rmapped = 0; |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 3089 | int rmap_count; |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 3090 | int set_spte_ret; |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3091 | int ret = RET_PF_RETRY; |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 3092 | bool flush = false; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3093 | |
Xiao Guangrong | f761620 | 2013-02-05 15:27:27 +0800 | [diff] [blame] | 3094 | pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, |
| 3095 | *sptep, write_fault, gfn); |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3096 | |
Takuya Yoshikawa | afd28fe | 2015-11-20 17:44:55 +0900 | [diff] [blame] | 3097 | if (is_shadow_present_pte(*sptep)) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3098 | /* |
| 3099 | * If we overwrite a PTE page pointer with a 2MB PMD, unlink |
| 3100 | * the parent of the now unreachable PTE. |
| 3101 | */ |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 3102 | if (level > PT_PAGE_TABLE_LEVEL && |
| 3103 | !is_large_pte(*sptep)) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3104 | struct kvm_mmu_page *child; |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 3105 | u64 pte = *sptep; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3106 | |
| 3107 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 3108 | drop_parent_pte(child, sptep); |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 3109 | flush = true; |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 3110 | } else if (pfn != spte_to_pfn(*sptep)) { |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 3111 | pgprintk("hfn old %llx new %llx\n", |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 3112 | spte_to_pfn(*sptep), pfn); |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 3113 | drop_spte(vcpu->kvm, sptep); |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 3114 | flush = true; |
Joerg Roedel | 6bed6b9 | 2009-02-18 14:08:59 +0100 | [diff] [blame] | 3115 | } else |
| 3116 | was_rmapped = 1; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3117 | } |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 3118 | |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 3119 | set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, |
| 3120 | speculative, true, host_writable); |
| 3121 | if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3122 | if (write_fault) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3123 | ret = RET_PF_EMULATE; |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 3124 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Marcelo Tosatti | a378b4e | 2008-09-23 13:18:31 -0300 | [diff] [blame] | 3125 | } |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 3126 | |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 3127 | if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 3128 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, |
| 3129 | KVM_PAGES_PER_HPAGE(level)); |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3130 | |
Takuya Yoshikawa | 029499b | 2015-11-20 17:44:05 +0900 | [diff] [blame] | 3131 | if (unlikely(is_mmio_spte(*sptep))) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3132 | ret = RET_PF_EMULATE; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3133 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 3134 | pgprintk("%s: setting spte %llx\n", __func__, *sptep); |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 3135 | trace_kvm_mmu_set_spte(level, gfn, sptep); |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 3136 | if (!was_rmapped && is_large_pte(*sptep)) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 3137 | ++vcpu->kvm->stat.lpages; |
| 3138 | |
Xiao Guangrong | ffb61bb | 2011-07-12 03:22:01 +0800 | [diff] [blame] | 3139 | if (is_shadow_present_pte(*sptep)) { |
Xiao Guangrong | ffb61bb | 2011-07-12 03:22:01 +0800 | [diff] [blame] | 3140 | if (!was_rmapped) { |
| 3141 | rmap_count = rmap_add(vcpu, sptep, gfn); |
| 3142 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) |
| 3143 | rmap_recycle(vcpu, sptep, gfn); |
| 3144 | } |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3145 | } |
Xiao Guangrong | cb9aaa3 | 2012-08-03 15:42:10 +0800 | [diff] [blame] | 3146 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3147 | return ret; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3148 | } |
| 3149 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 3150 | static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3151 | bool no_dirty_log) |
| 3152 | { |
| 3153 | struct kvm_memory_slot *slot; |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3154 | |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 3155 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); |
Xiao Guangrong | 903816f | 2012-07-17 21:54:11 +0800 | [diff] [blame] | 3156 | if (!slot) |
Xiao Guangrong | 6c8ee57 | 2012-08-03 15:37:54 +0800 | [diff] [blame] | 3157 | return KVM_PFN_ERR_FAULT; |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3158 | |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 3159 | return gfn_to_pfn_memslot_atomic(slot, gfn); |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3160 | } |
| 3161 | |
| 3162 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, |
| 3163 | struct kvm_mmu_page *sp, |
| 3164 | u64 *start, u64 *end) |
| 3165 | { |
| 3166 | struct page *pages[PTE_PREFETCH_NUM]; |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 3167 | struct kvm_memory_slot *slot; |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 3168 | unsigned int access = sp->role.access; |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3169 | int i, ret; |
| 3170 | gfn_t gfn; |
| 3171 | |
| 3172 | gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 3173 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); |
| 3174 | if (!slot) |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3175 | return -1; |
| 3176 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 3177 | ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3178 | if (ret <= 0) |
| 3179 | return -1; |
| 3180 | |
Junaid Shahid | 43fdcda | 2019-01-03 16:22:21 -0800 | [diff] [blame] | 3181 | for (i = 0; i < ret; i++, gfn++, start++) { |
Takuya Yoshikawa | 029499b | 2015-11-20 17:44:05 +0900 | [diff] [blame] | 3182 | mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn, |
| 3183 | page_to_pfn(pages[i]), true, true); |
Junaid Shahid | 43fdcda | 2019-01-03 16:22:21 -0800 | [diff] [blame] | 3184 | put_page(pages[i]); |
| 3185 | } |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3186 | |
| 3187 | return 0; |
| 3188 | } |
| 3189 | |
| 3190 | static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, |
| 3191 | struct kvm_mmu_page *sp, u64 *sptep) |
| 3192 | { |
| 3193 | u64 *spte, *start = NULL; |
| 3194 | int i; |
| 3195 | |
| 3196 | WARN_ON(!sp->role.direct); |
| 3197 | |
| 3198 | i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); |
| 3199 | spte = sp->spt + i; |
| 3200 | |
| 3201 | for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 3202 | if (is_shadow_present_pte(*spte) || spte == sptep) { |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3203 | if (!start) |
| 3204 | continue; |
| 3205 | if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) |
| 3206 | break; |
| 3207 | start = NULL; |
| 3208 | } else if (!start) |
| 3209 | start = spte; |
| 3210 | } |
| 3211 | } |
| 3212 | |
| 3213 | static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) |
| 3214 | { |
| 3215 | struct kvm_mmu_page *sp; |
| 3216 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3217 | sp = page_header(__pa(sptep)); |
| 3218 | |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3219 | /* |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3220 | * Without accessed bits, there's no way to distinguish between |
| 3221 | * actually accessed translations and prefetched, so disable pte |
| 3222 | * prefetch if accessed bits aren't available. |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3223 | */ |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3224 | if (sp_ad_disabled(sp)) |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3225 | return; |
| 3226 | |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3227 | if (sp->role.level > PT_PAGE_TABLE_LEVEL) |
| 3228 | return; |
| 3229 | |
| 3230 | __direct_pte_prefetch(vcpu, sp, sptep); |
| 3231 | } |
| 3232 | |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3233 | static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3234 | kvm_pfn_t pfn, struct kvm_memory_slot *slot) |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3235 | { |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3236 | unsigned long hva; |
| 3237 | pte_t *pte; |
| 3238 | int level; |
| 3239 | |
| 3240 | BUILD_BUG_ON(PT_PAGE_TABLE_LEVEL != (int)PG_LEVEL_4K || |
| 3241 | PT_DIRECTORY_LEVEL != (int)PG_LEVEL_2M || |
| 3242 | PT_PDPE_LEVEL != (int)PG_LEVEL_1G); |
| 3243 | |
Sean Christopherson | e851265 | 2020-01-08 12:24:48 -0800 | [diff] [blame] | 3244 | if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn)) |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3245 | return PT_PAGE_TABLE_LEVEL; |
| 3246 | |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3247 | /* |
| 3248 | * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() |
| 3249 | * is not solely for performance, it's also necessary to avoid the |
| 3250 | * "writable" check in __gfn_to_hva_many(), which will always fail on |
| 3251 | * read-only memslots due to gfn_to_hva() assuming writes. Earlier |
| 3252 | * page fault steps have already verified the guest isn't writing a |
| 3253 | * read-only memslot. |
| 3254 | */ |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3255 | hva = __gfn_to_hva_memslot(slot, gfn); |
| 3256 | |
| 3257 | pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level); |
| 3258 | if (unlikely(!pte)) |
| 3259 | return PT_PAGE_TABLE_LEVEL; |
| 3260 | |
| 3261 | return level; |
| 3262 | } |
| 3263 | |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3264 | static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 3265 | int max_level, kvm_pfn_t *pfnp) |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 3266 | { |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3267 | struct kvm_memory_slot *slot; |
Sean Christopherson | 2c0629f | 2020-01-08 12:24:47 -0800 | [diff] [blame] | 3268 | struct kvm_lpage_info *linfo; |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 3269 | kvm_pfn_t pfn = *pfnp; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3270 | kvm_pfn_t mask; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3271 | int level; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3272 | |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3273 | if (unlikely(max_level == PT_PAGE_TABLE_LEVEL)) |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3274 | return PT_PAGE_TABLE_LEVEL; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3275 | |
Sean Christopherson | e851265 | 2020-01-08 12:24:48 -0800 | [diff] [blame] | 3276 | if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn)) |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3277 | return PT_PAGE_TABLE_LEVEL; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3278 | |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3279 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true); |
| 3280 | if (!slot) |
| 3281 | return PT_PAGE_TABLE_LEVEL; |
| 3282 | |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 3283 | max_level = min(max_level, max_page_level); |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3284 | for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) { |
Sean Christopherson | 2c0629f | 2020-01-08 12:24:47 -0800 | [diff] [blame] | 3285 | linfo = lpage_info_slot(gfn, slot, max_level); |
| 3286 | if (!linfo->disallow_lpage) |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3287 | break; |
| 3288 | } |
| 3289 | |
| 3290 | if (max_level == PT_PAGE_TABLE_LEVEL) |
| 3291 | return PT_PAGE_TABLE_LEVEL; |
| 3292 | |
| 3293 | level = host_pfn_mapping_level(vcpu, gfn, pfn, slot); |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3294 | if (level == PT_PAGE_TABLE_LEVEL) |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3295 | return level; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3296 | |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3297 | level = min(level, max_level); |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 3298 | |
| 3299 | /* |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3300 | * mmu_notifier_retry() was successful and mmu_lock is held, so |
| 3301 | * the pmd can't be split from under us. |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 3302 | */ |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3303 | mask = KVM_PAGES_PER_HPAGE(level) - 1; |
| 3304 | VM_BUG_ON((gfn & mask) != (pfn & mask)); |
| 3305 | *pfnp = pfn & ~mask; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3306 | |
| 3307 | return level; |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 3308 | } |
| 3309 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 3310 | static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it, |
| 3311 | gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) |
| 3312 | { |
| 3313 | int level = *levelp; |
| 3314 | u64 spte = *it.sptep; |
| 3315 | |
| 3316 | if (it.level == level && level > PT_PAGE_TABLE_LEVEL && |
| 3317 | is_nx_huge_page_enabled() && |
| 3318 | is_shadow_present_pte(spte) && |
| 3319 | !is_large_pte(spte)) { |
| 3320 | /* |
| 3321 | * A small SPTE exists for this pfn, but FNAME(fetch) |
| 3322 | * and __direct_map would like to create a large PTE |
| 3323 | * instead: just force them to go down another level, |
| 3324 | * patching back for them into pfn the next 9 bits of |
| 3325 | * the address. |
| 3326 | */ |
| 3327 | u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1); |
| 3328 | *pfnp |= gfn & page_mask; |
| 3329 | (*levelp)--; |
| 3330 | } |
| 3331 | } |
| 3332 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3333 | static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3334 | int map_writable, int max_level, kvm_pfn_t pfn, |
| 3335 | bool prefault, bool account_disallowed_nx_lpage) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3336 | { |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3337 | struct kvm_shadow_walk_iterator it; |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 3338 | struct kvm_mmu_page *sp; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3339 | int level, ret; |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3340 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 3341 | gfn_t base_gfn = gfn; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3342 | |
Sean Christopherson | 0c7a98e | 2019-12-06 15:57:28 -0800 | [diff] [blame] | 3343 | if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3344 | return RET_PF_RETRY; |
Marcelo Tosatti | 989c6b3 | 2013-12-19 15:28:51 -0200 | [diff] [blame] | 3345 | |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3346 | level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn); |
Sean Christopherson | 4cd071d | 2019-12-06 15:57:26 -0800 | [diff] [blame] | 3347 | |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 3348 | trace_kvm_mmu_spte_requested(gpa, level, pfn); |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3349 | for_each_shadow_entry(vcpu, gpa, it) { |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 3350 | /* |
| 3351 | * We cannot overwrite existing page tables with an NX |
| 3352 | * large page, as the leaf could be executable. |
| 3353 | */ |
| 3354 | disallowed_hugepage_adjust(it, gfn, &pfn, &level); |
| 3355 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3356 | base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); |
| 3357 | if (it.level == level) |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 3358 | break; |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 3359 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3360 | drop_large_spte(vcpu, it.sptep); |
| 3361 | if (!is_shadow_present_pte(*it.sptep)) { |
| 3362 | sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, |
| 3363 | it.level - 1, true, ACC_ALL); |
Lai Jiangshan | c9fa0b3 | 2010-05-26 16:48:25 +0800 | [diff] [blame] | 3364 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3365 | link_shadow_page(vcpu, it.sptep, sp); |
Sean Christopherson | 2cb70fd | 2019-12-06 15:57:23 -0800 | [diff] [blame] | 3366 | if (account_disallowed_nx_lpage) |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 3367 | account_huge_nx_page(vcpu->kvm, sp); |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 3368 | } |
| 3369 | } |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3370 | |
| 3371 | ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, |
| 3372 | write, level, base_gfn, pfn, prefault, |
| 3373 | map_writable); |
| 3374 | direct_pte_prefetch(vcpu, it.sptep); |
| 3375 | ++vcpu->stat.pf_fixed; |
| 3376 | return ret; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3377 | } |
| 3378 | |
Huang Ying | 77db5cb | 2010-10-08 16:24:15 +0800 | [diff] [blame] | 3379 | static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 3380 | { |
Eric W. Biederman | 585a8b9 | 2018-04-16 14:23:27 -0500 | [diff] [blame] | 3381 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk); |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 3382 | } |
| 3383 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 3384 | static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 3385 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 3386 | /* |
| 3387 | * Do not cache the mmio info caused by writing the readonly gfn |
| 3388 | * into the spte otherwise read access on readonly gfn also can |
| 3389 | * caused mmio page fault and treat it as mmio access. |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 3390 | */ |
| 3391 | if (pfn == KVM_PFN_ERR_RO_FAULT) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3392 | return RET_PF_EMULATE; |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 3393 | |
Xiao Guangrong | e6c1502 | 2012-08-03 15:38:36 +0800 | [diff] [blame] | 3394 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 3395 | kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3396 | return RET_PF_RETRY; |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3397 | } |
Gleb Natapov | edba23e | 2010-07-07 20:16:45 +0300 | [diff] [blame] | 3398 | |
Sean Christopherson | 2c151b2 | 2018-03-29 14:48:30 -0700 | [diff] [blame] | 3399 | return -EFAULT; |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 3400 | } |
| 3401 | |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3402 | static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 3403 | kvm_pfn_t pfn, unsigned int access, |
| 3404 | int *ret_val) |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3405 | { |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3406 | /* The pfn is invalid, report the error! */ |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 3407 | if (unlikely(is_error_pfn(pfn))) { |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3408 | *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); |
Paolo Bonzini | 798e88b | 2016-02-23 15:28:51 +0100 | [diff] [blame] | 3409 | return true; |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3410 | } |
| 3411 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3412 | if (unlikely(is_noslot_pfn(pfn))) |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 3413 | vcpu_cache_mmio_info(vcpu, gva, gfn, |
| 3414 | access & shadow_mmio_access_mask); |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3415 | |
Paolo Bonzini | 798e88b | 2016-02-23 15:28:51 +0100 | [diff] [blame] | 3416 | return false; |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3417 | } |
| 3418 | |
Xiao Guangrong | e5552fd | 2013-07-30 21:01:59 +0800 | [diff] [blame] | 3419 | static bool page_fault_can_be_fast(u32 error_code) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3420 | { |
| 3421 | /* |
Xiao Guangrong | 1c118b8 | 2013-07-18 12:52:37 +0800 | [diff] [blame] | 3422 | * Do not fix the mmio spte with invalid generation number which |
| 3423 | * need to be updated by slow page fault path. |
| 3424 | */ |
| 3425 | if (unlikely(error_code & PFERR_RSVD_MASK)) |
| 3426 | return false; |
| 3427 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3428 | /* See if the page fault is due to an NX violation */ |
| 3429 | if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)) |
| 3430 | == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)))) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3431 | return false; |
| 3432 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3433 | /* |
| 3434 | * #PF can be fast if: |
| 3435 | * 1. The shadow page table entry is not present, which could mean that |
| 3436 | * the fault is potentially caused by access tracking (if enabled). |
| 3437 | * 2. The shadow page table entry is present and the fault |
| 3438 | * is caused by write-protect, that means we just need change the W |
| 3439 | * bit of the spte which can be done out of mmu-lock. |
| 3440 | * |
| 3441 | * However, if access tracking is disabled we know that a non-present |
| 3442 | * page must be a genuine page fault where we have to create a new SPTE. |
| 3443 | * So, if access tracking is disabled, we return true only for write |
| 3444 | * accesses to a present page. |
| 3445 | */ |
| 3446 | |
| 3447 | return shadow_acc_track_mask != 0 || |
| 3448 | ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)) |
| 3449 | == (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3450 | } |
| 3451 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3452 | /* |
| 3453 | * Returns true if the SPTE was fixed successfully. Otherwise, |
| 3454 | * someone else modified the SPTE from its original value. |
| 3455 | */ |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3456 | static bool |
Xiao Guangrong | 92a476c | 2014-04-17 17:06:13 +0800 | [diff] [blame] | 3457 | fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3458 | u64 *sptep, u64 old_spte, u64 new_spte) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3459 | { |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3460 | gfn_t gfn; |
| 3461 | |
| 3462 | WARN_ON(!sp->role.direct); |
| 3463 | |
Kai Huang | 9b51a63 | 2015-01-28 10:54:25 +0800 | [diff] [blame] | 3464 | /* |
| 3465 | * Theoretically we could also set dirty bit (and flush TLB) here in |
| 3466 | * order to eliminate unnecessary PML logging. See comments in |
| 3467 | * set_spte. But fast_page_fault is very unlikely to happen with PML |
| 3468 | * enabled, so we do not do this. This might result in the same GPA |
| 3469 | * to be logged in PML buffer again when the write really happens, and |
| 3470 | * eventually to be called by mark_page_dirty twice. But it's also no |
| 3471 | * harm. This also avoids the TLB flush needed after setting dirty bit |
| 3472 | * so non-PML cases won't be impacted. |
| 3473 | * |
| 3474 | * Compare with set_spte where instead shadow_dirty_mask is set. |
| 3475 | */ |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3476 | if (cmpxchg64(sptep, old_spte, new_spte) != old_spte) |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3477 | return false; |
| 3478 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3479 | if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3480 | /* |
| 3481 | * The gfn of direct spte is stable since it is |
| 3482 | * calculated by sp->gfn. |
| 3483 | */ |
| 3484 | gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); |
| 3485 | kvm_vcpu_mark_page_dirty(vcpu, gfn); |
| 3486 | } |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3487 | |
| 3488 | return true; |
| 3489 | } |
| 3490 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3491 | static bool is_access_allowed(u32 fault_err_code, u64 spte) |
| 3492 | { |
| 3493 | if (fault_err_code & PFERR_FETCH_MASK) |
| 3494 | return is_executable_pte(spte); |
| 3495 | |
| 3496 | if (fault_err_code & PFERR_WRITE_MASK) |
| 3497 | return is_writable_pte(spte); |
| 3498 | |
| 3499 | /* Fault was on Read access */ |
| 3500 | return spte & PT_PRESENT_MASK; |
| 3501 | } |
| 3502 | |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3503 | /* |
| 3504 | * Return value: |
| 3505 | * - true: let the vcpu to access on the same address again. |
| 3506 | * - false: let the real page fault path to fix it. |
| 3507 | */ |
Sean Christopherson | f9fa250 | 2020-01-08 12:24:42 -0800 | [diff] [blame] | 3508 | static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3509 | u32 error_code) |
| 3510 | { |
| 3511 | struct kvm_shadow_walk_iterator iterator; |
Xiao Guangrong | 92a476c | 2014-04-17 17:06:13 +0800 | [diff] [blame] | 3512 | struct kvm_mmu_page *sp; |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3513 | bool fault_handled = false; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3514 | u64 spte = 0ull; |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3515 | uint retry_count = 0; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3516 | |
Xiao Guangrong | e5552fd | 2013-07-30 21:01:59 +0800 | [diff] [blame] | 3517 | if (!page_fault_can_be_fast(error_code)) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3518 | return false; |
| 3519 | |
| 3520 | walk_shadow_page_lockless_begin(vcpu); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3521 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3522 | do { |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3523 | u64 new_spte; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3524 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3525 | for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte) |
Sean Christopherson | f9fa250 | 2020-01-08 12:24:42 -0800 | [diff] [blame] | 3526 | if (!is_shadow_present_pte(spte)) |
Junaid Shahid | d162f30 | 2016-12-21 20:29:30 -0800 | [diff] [blame] | 3527 | break; |
| 3528 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3529 | sp = page_header(__pa(iterator.sptep)); |
| 3530 | if (!is_last_spte(spte, sp->role.level)) |
| 3531 | break; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3532 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3533 | /* |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3534 | * Check whether the memory access that caused the fault would |
| 3535 | * still cause it if it were to be performed right now. If not, |
| 3536 | * then this is a spurious fault caused by TLB lazily flushed, |
| 3537 | * or some other CPU has already fixed the PTE after the |
| 3538 | * current CPU took the fault. |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3539 | * |
| 3540 | * Need not check the access of upper level table entries since |
| 3541 | * they are always ACC_ALL. |
| 3542 | */ |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3543 | if (is_access_allowed(error_code, spte)) { |
| 3544 | fault_handled = true; |
| 3545 | break; |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3546 | } |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3547 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3548 | new_spte = spte; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3549 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3550 | if (is_access_track_spte(spte)) |
| 3551 | new_spte = restore_acc_track_spte(new_spte); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3552 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3553 | /* |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3554 | * Currently, to simplify the code, write-protection can |
| 3555 | * be removed in the fast path only if the SPTE was |
| 3556 | * write-protected for dirty-logging or access tracking. |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3557 | */ |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3558 | if ((error_code & PFERR_WRITE_MASK) && |
Miaohe Lin | e630269 | 2020-02-15 10:44:22 +0800 | [diff] [blame] | 3559 | spte_can_locklessly_be_made_writable(spte)) { |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3560 | new_spte |= PT_WRITABLE_MASK; |
| 3561 | |
| 3562 | /* |
| 3563 | * Do not fix write-permission on the large spte. Since |
| 3564 | * we only dirty the first page into the dirty-bitmap in |
| 3565 | * fast_pf_fix_direct_spte(), other pages are missed |
| 3566 | * if its slot has dirty logging enabled. |
| 3567 | * |
| 3568 | * Instead, we let the slow page fault path create a |
| 3569 | * normal spte to fix the access. |
| 3570 | * |
| 3571 | * See the comments in kvm_arch_commit_memory_region(). |
| 3572 | */ |
| 3573 | if (sp->role.level > PT_PAGE_TABLE_LEVEL) |
| 3574 | break; |
| 3575 | } |
| 3576 | |
| 3577 | /* Verify that the fault can be handled in the fast path */ |
| 3578 | if (new_spte == spte || |
| 3579 | !is_access_allowed(error_code, new_spte)) |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3580 | break; |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 3581 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3582 | /* |
| 3583 | * Currently, fast page fault only works for direct mapping |
| 3584 | * since the gfn is not stable for indirect shadow page. See |
Christoph Hellwig | 2f5947d | 2019-07-24 09:24:49 +0200 | [diff] [blame] | 3585 | * Documentation/virt/kvm/locking.txt to get more detail. |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3586 | */ |
| 3587 | fault_handled = fast_pf_fix_direct_spte(vcpu, sp, |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3588 | iterator.sptep, spte, |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3589 | new_spte); |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3590 | if (fault_handled) |
| 3591 | break; |
| 3592 | |
| 3593 | if (++retry_count > 4) { |
| 3594 | printk_once(KERN_WARNING |
| 3595 | "kvm: Fast #PF retrying more than 4 times.\n"); |
| 3596 | break; |
| 3597 | } |
| 3598 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3599 | } while (true); |
| 3600 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3601 | trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep, |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3602 | spte, fault_handled); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3603 | walk_shadow_page_lockless_end(vcpu); |
| 3604 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3605 | return fault_handled; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3606 | } |
| 3607 | |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3608 | static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, |
| 3609 | struct list_head *invalid_list) |
| 3610 | { |
| 3611 | struct kvm_mmu_page *sp; |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 3612 | |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3613 | if (!VALID_PAGE(*root_hpa)) |
| 3614 | return; |
| 3615 | |
| 3616 | sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK); |
| 3617 | --sp->root_count; |
| 3618 | if (!sp->root_count && sp->role.invalid) |
| 3619 | kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); |
| 3620 | |
| 3621 | *root_hpa = INVALID_PAGE; |
| 3622 | } |
| 3623 | |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3624 | /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */ |
Vitaly Kuznetsov | 6a82cd1 | 2018-10-08 21:28:07 +0200 | [diff] [blame] | 3625 | void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 3626 | ulong roots_to_free) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3627 | { |
| 3628 | int i; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3629 | LIST_HEAD(invalid_list); |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3630 | bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3631 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3632 | BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3633 | |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3634 | /* Before acquiring the MMU lock, see if we need to do any real work. */ |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3635 | if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) { |
| 3636 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 3637 | if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) && |
| 3638 | VALID_PAGE(mmu->prev_roots[i].hpa)) |
| 3639 | break; |
| 3640 | |
| 3641 | if (i == KVM_MMU_NUM_PREV_ROOTS) |
| 3642 | return; |
| 3643 | } |
Gleb Natapov | 35af577 | 2013-05-16 11:55:51 +0300 | [diff] [blame] | 3644 | |
Gleb Natapov | 35af577 | 2013-05-16 11:55:51 +0300 | [diff] [blame] | 3645 | spin_lock(&vcpu->kvm->mmu_lock); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3646 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3647 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 3648 | if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) |
| 3649 | mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa, |
| 3650 | &invalid_list); |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 3651 | |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3652 | if (free_active_root) { |
| 3653 | if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && |
| 3654 | (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { |
| 3655 | mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, |
| 3656 | &invalid_list); |
| 3657 | } else { |
| 3658 | for (i = 0; i < 4; ++i) |
| 3659 | if (mmu->pae_root[i] != 0) |
| 3660 | mmu_free_root_page(vcpu->kvm, |
| 3661 | &mmu->pae_root[i], |
| 3662 | &invalid_list); |
| 3663 | mmu->root_hpa = INVALID_PAGE; |
| 3664 | } |
Vitaly Kuznetsov | ad7dc69 | 2019-02-22 17:45:01 +0100 | [diff] [blame] | 3665 | mmu->root_cr3 = 0; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3666 | } |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3667 | |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3668 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 3669 | spin_unlock(&vcpu->kvm->mmu_lock); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3670 | } |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3671 | EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3672 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3673 | static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) |
| 3674 | { |
| 3675 | int ret = 0; |
| 3676 | |
| 3677 | if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 3678 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3679 | ret = 1; |
| 3680 | } |
| 3681 | |
| 3682 | return ret; |
| 3683 | } |
| 3684 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3685 | static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) |
| 3686 | { |
| 3687 | struct kvm_mmu_page *sp; |
Avi Kivity | 7ebaf15 | 2010-10-03 18:51:39 +0200 | [diff] [blame] | 3688 | unsigned i; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3689 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3690 | if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3691 | spin_lock(&vcpu->kvm->mmu_lock); |
Wanpeng Li | 26eeb53 | 2017-08-10 16:28:02 -0700 | [diff] [blame] | 3692 | if(make_mmu_pages_available(vcpu) < 0) { |
| 3693 | spin_unlock(&vcpu->kvm->mmu_lock); |
Wanpeng Li | ed52870 | 2017-12-04 22:21:30 -0800 | [diff] [blame] | 3694 | return -ENOSPC; |
Wanpeng Li | 26eeb53 | 2017-08-10 16:28:02 -0700 | [diff] [blame] | 3695 | } |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 3696 | sp = kvm_mmu_get_page(vcpu, 0, 0, |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3697 | vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3698 | ++sp->root_count; |
| 3699 | spin_unlock(&vcpu->kvm->mmu_lock); |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3700 | vcpu->arch.mmu->root_hpa = __pa(sp->spt); |
| 3701 | } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) { |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3702 | for (i = 0; i < 4; ++i) { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3703 | hpa_t root = vcpu->arch.mmu->pae_root[i]; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3704 | |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 3705 | MMU_WARN_ON(VALID_PAGE(root)); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3706 | spin_lock(&vcpu->kvm->mmu_lock); |
Wanpeng Li | 26eeb53 | 2017-08-10 16:28:02 -0700 | [diff] [blame] | 3707 | if (make_mmu_pages_available(vcpu) < 0) { |
| 3708 | spin_unlock(&vcpu->kvm->mmu_lock); |
Wanpeng Li | ed52870 | 2017-12-04 22:21:30 -0800 | [diff] [blame] | 3709 | return -ENOSPC; |
Wanpeng Li | 26eeb53 | 2017-08-10 16:28:02 -0700 | [diff] [blame] | 3710 | } |
Avi Kivity | 649497d | 2010-12-28 12:09:07 +0200 | [diff] [blame] | 3711 | sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), |
Takuya Yoshikawa | bb11c6c | 2015-11-26 21:16:35 +0900 | [diff] [blame] | 3712 | i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3713 | root = __pa(sp->spt); |
| 3714 | ++sp->root_count; |
| 3715 | spin_unlock(&vcpu->kvm->mmu_lock); |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3716 | vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3717 | } |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3718 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3719 | } else |
| 3720 | BUG(); |
Sean Christopherson | 3651c7f | 2020-02-28 14:52:39 -0800 | [diff] [blame] | 3721 | |
| 3722 | /* root_cr3 is ignored for direct MMUs. */ |
| 3723 | vcpu->arch.mmu->root_cr3 = 0; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3724 | |
| 3725 | return 0; |
| 3726 | } |
| 3727 | |
| 3728 | static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3729 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3730 | struct kvm_mmu_page *sp; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3731 | u64 pdptr, pm_mask; |
Vitaly Kuznetsov | ad7dc69 | 2019-02-22 17:45:01 +0100 | [diff] [blame] | 3732 | gfn_t root_gfn, root_cr3; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3733 | int i; |
Avi Kivity | 3bb65a2 | 2007-01-05 16:36:51 -0800 | [diff] [blame] | 3734 | |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 3735 | root_cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); |
Vitaly Kuznetsov | ad7dc69 | 2019-02-22 17:45:01 +0100 | [diff] [blame] | 3736 | root_gfn = root_cr3 >> PAGE_SHIFT; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3737 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3738 | if (mmu_check_root(vcpu, root_gfn)) |
| 3739 | return 1; |
| 3740 | |
| 3741 | /* |
| 3742 | * Do we shadow a long mode page table? If so we need to |
| 3743 | * write-protect the guests page table root. |
| 3744 | */ |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3745 | if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { |
| 3746 | hpa_t root = vcpu->arch.mmu->root_hpa; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3747 | |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 3748 | MMU_WARN_ON(VALID_PAGE(root)); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3749 | |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 3750 | spin_lock(&vcpu->kvm->mmu_lock); |
Wanpeng Li | 26eeb53 | 2017-08-10 16:28:02 -0700 | [diff] [blame] | 3751 | if (make_mmu_pages_available(vcpu) < 0) { |
| 3752 | spin_unlock(&vcpu->kvm->mmu_lock); |
Wanpeng Li | ed52870 | 2017-12-04 22:21:30 -0800 | [diff] [blame] | 3753 | return -ENOSPC; |
Wanpeng Li | 26eeb53 | 2017-08-10 16:28:02 -0700 | [diff] [blame] | 3754 | } |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 3755 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3756 | vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3757 | root = __pa(sp->spt); |
| 3758 | ++sp->root_count; |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 3759 | spin_unlock(&vcpu->kvm->mmu_lock); |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3760 | vcpu->arch.mmu->root_hpa = root; |
Vitaly Kuznetsov | ad7dc69 | 2019-02-22 17:45:01 +0100 | [diff] [blame] | 3761 | goto set_root_cr3; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3762 | } |
Joerg Roedel | f87f928 | 2010-09-02 17:29:45 +0200 | [diff] [blame] | 3763 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3764 | /* |
| 3765 | * We shadow a 32 bit page table. This may be a legacy 2-level |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3766 | * or a PAE 3-level page table. In either case we need to be aware that |
| 3767 | * the shadow page table may be a PAE or a long mode page table. |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3768 | */ |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3769 | pm_mask = PT_PRESENT_MASK; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3770 | if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3771 | pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; |
| 3772 | |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3773 | for (i = 0; i < 4; ++i) { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3774 | hpa_t root = vcpu->arch.mmu->pae_root[i]; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3775 | |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 3776 | MMU_WARN_ON(VALID_PAGE(root)); |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3777 | if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { |
| 3778 | pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); |
Bandan Das | 812f30b | 2016-07-12 18:18:50 -0400 | [diff] [blame] | 3779 | if (!(pdptr & PT_PRESENT_MASK)) { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3780 | vcpu->arch.mmu->pae_root[i] = 0; |
Avi Kivity | 417726a | 2007-04-12 17:35:58 +0300 | [diff] [blame] | 3781 | continue; |
| 3782 | } |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 3783 | root_gfn = pdptr >> PAGE_SHIFT; |
Joerg Roedel | f87f928 | 2010-09-02 17:29:45 +0200 | [diff] [blame] | 3784 | if (mmu_check_root(vcpu, root_gfn)) |
| 3785 | return 1; |
Eric Northup | 5a7388c | 2010-04-26 17:00:05 -0700 | [diff] [blame] | 3786 | } |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 3787 | spin_lock(&vcpu->kvm->mmu_lock); |
Wanpeng Li | 26eeb53 | 2017-08-10 16:28:02 -0700 | [diff] [blame] | 3788 | if (make_mmu_pages_available(vcpu) < 0) { |
| 3789 | spin_unlock(&vcpu->kvm->mmu_lock); |
Wanpeng Li | ed52870 | 2017-12-04 22:21:30 -0800 | [diff] [blame] | 3790 | return -ENOSPC; |
Wanpeng Li | 26eeb53 | 2017-08-10 16:28:02 -0700 | [diff] [blame] | 3791 | } |
Takuya Yoshikawa | bb11c6c | 2015-11-26 21:16:35 +0900 | [diff] [blame] | 3792 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, |
| 3793 | 0, ACC_ALL); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3794 | root = __pa(sp->spt); |
| 3795 | ++sp->root_count; |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 3796 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3797 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3798 | vcpu->arch.mmu->pae_root[i] = root | pm_mask; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3799 | } |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3800 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3801 | |
| 3802 | /* |
| 3803 | * If we shadow a 32 bit page table with a long mode page |
| 3804 | * table we enter this path. |
| 3805 | */ |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3806 | if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { |
| 3807 | if (vcpu->arch.mmu->lm_root == NULL) { |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3808 | /* |
| 3809 | * The additional page necessary for this is only |
| 3810 | * allocated on demand. |
| 3811 | */ |
| 3812 | |
| 3813 | u64 *lm_root; |
| 3814 | |
Ben Gardon | 254272c | 2019-02-11 11:02:50 -0800 | [diff] [blame] | 3815 | lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3816 | if (lm_root == NULL) |
| 3817 | return 1; |
| 3818 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3819 | lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3820 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3821 | vcpu->arch.mmu->lm_root = lm_root; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3822 | } |
| 3823 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3824 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3825 | } |
| 3826 | |
Vitaly Kuznetsov | ad7dc69 | 2019-02-22 17:45:01 +0100 | [diff] [blame] | 3827 | set_root_cr3: |
| 3828 | vcpu->arch.mmu->root_cr3 = root_cr3; |
| 3829 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3830 | return 0; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3831 | } |
| 3832 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3833 | static int mmu_alloc_roots(struct kvm_vcpu *vcpu) |
| 3834 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3835 | if (vcpu->arch.mmu->direct_map) |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3836 | return mmu_alloc_direct_roots(vcpu); |
| 3837 | else |
| 3838 | return mmu_alloc_shadow_roots(vcpu); |
| 3839 | } |
| 3840 | |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3841 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3842 | { |
| 3843 | int i; |
| 3844 | struct kvm_mmu_page *sp; |
| 3845 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3846 | if (vcpu->arch.mmu->direct_map) |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3847 | return; |
| 3848 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3849 | if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3850 | return; |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 3851 | |
David Matlack | 56f17dd | 2014-08-18 15:46:07 -0700 | [diff] [blame] | 3852 | vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3853 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3854 | if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { |
| 3855 | hpa_t root = vcpu->arch.mmu->root_hpa; |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3856 | sp = page_header(root); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3857 | |
| 3858 | /* |
| 3859 | * Even if another CPU was marking the SP as unsync-ed |
| 3860 | * simultaneously, any guest page table changes are not |
| 3861 | * guaranteed to be visible anyway until this VCPU issues a TLB |
| 3862 | * flush strictly after those changes are made. We only need to |
| 3863 | * ensure that the other CPU sets these flags before any actual |
| 3864 | * changes to the page tables are made. The comments in |
| 3865 | * mmu_need_write_protect() describe what could go wrong if this |
| 3866 | * requirement isn't satisfied. |
| 3867 | */ |
| 3868 | if (!smp_load_acquire(&sp->unsync) && |
| 3869 | !smp_load_acquire(&sp->unsync_children)) |
| 3870 | return; |
| 3871 | |
| 3872 | spin_lock(&vcpu->kvm->mmu_lock); |
| 3873 | kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); |
| 3874 | |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3875 | mmu_sync_children(vcpu, sp); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3876 | |
Xiao Guangrong | 0375f7f | 2011-11-28 20:41:00 +0800 | [diff] [blame] | 3877 | kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3878 | spin_unlock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3879 | return; |
| 3880 | } |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3881 | |
| 3882 | spin_lock(&vcpu->kvm->mmu_lock); |
| 3883 | kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); |
| 3884 | |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3885 | for (i = 0; i < 4; ++i) { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3886 | hpa_t root = vcpu->arch.mmu->pae_root[i]; |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3887 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3888 | if (root && VALID_PAGE(root)) { |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3889 | root &= PT64_BASE_ADDR_MASK; |
| 3890 | sp = page_header(root); |
| 3891 | mmu_sync_children(vcpu, sp); |
| 3892 | } |
| 3893 | } |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3894 | |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3895 | kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3896 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3897 | } |
Nadav Har'El | bfd0a56 | 2013-08-05 11:07:17 +0300 | [diff] [blame] | 3898 | EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3899 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3900 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr, |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3901 | u32 access, struct x86_exception *exception) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3902 | { |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3903 | if (exception) |
| 3904 | exception->error_code = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3905 | return vaddr; |
| 3906 | } |
| 3907 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3908 | static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr, |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3909 | u32 access, |
| 3910 | struct x86_exception *exception) |
Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 3911 | { |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3912 | if (exception) |
| 3913 | exception->error_code = 0; |
Paolo Bonzini | 54987b7 | 2014-09-02 13:23:06 +0200 | [diff] [blame] | 3914 | return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); |
Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 3915 | } |
| 3916 | |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3917 | static bool |
| 3918 | __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) |
| 3919 | { |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3920 | int bit7 = (pte >> 7) & 1; |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3921 | |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3922 | return pte & rsvd_check->rsvd_bits_mask[bit7][level-1]; |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3923 | } |
| 3924 | |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3925 | static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte) |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3926 | { |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3927 | return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f); |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3928 | } |
| 3929 | |
Takuya Yoshikawa | ded5874 | 2016-02-22 17:23:40 +0900 | [diff] [blame] | 3930 | static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3931 | { |
Paolo Bonzini | 9034e6e | 2017-08-17 18:36:58 +0200 | [diff] [blame] | 3932 | /* |
| 3933 | * A nested guest cannot use the MMIO cache if it is using nested |
| 3934 | * page tables, because cr2 is a nGPA while the cache stores GPAs. |
| 3935 | */ |
| 3936 | if (mmu_is_nested(vcpu)) |
| 3937 | return false; |
| 3938 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3939 | if (direct) |
| 3940 | return vcpu_match_mmio_gpa(vcpu, addr); |
| 3941 | |
| 3942 | return vcpu_match_mmio_gva(vcpu, addr); |
| 3943 | } |
| 3944 | |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3945 | /* return true if reserved bit is detected on spte. */ |
| 3946 | static bool |
| 3947 | walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3948 | { |
| 3949 | struct kvm_shadow_walk_iterator iterator; |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 3950 | u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull; |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3951 | struct rsvd_bits_validate *rsvd_check; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3952 | int root, leaf; |
| 3953 | bool reserved = false; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3954 | |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3955 | rsvd_check = &vcpu->arch.mmu->shadow_zero_check; |
Marcelo Tosatti | 37f6a4e | 2014-01-03 17:09:32 -0200 | [diff] [blame] | 3956 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3957 | walk_shadow_page_lockless_begin(vcpu); |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3958 | |
Paolo Bonzini | 29ecd66 | 2015-09-06 16:24:50 +0200 | [diff] [blame] | 3959 | for (shadow_walk_init(&iterator, vcpu, addr), |
| 3960 | leaf = root = iterator.level; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3961 | shadow_walk_okay(&iterator); |
| 3962 | __shadow_walk_next(&iterator, spte)) { |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3963 | spte = mmu_spte_get_lockless(iterator.sptep); |
| 3964 | |
| 3965 | sptes[leaf - 1] = spte; |
Paolo Bonzini | 29ecd66 | 2015-09-06 16:24:50 +0200 | [diff] [blame] | 3966 | leaf--; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3967 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3968 | if (!is_shadow_present_pte(spte)) |
| 3969 | break; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3970 | |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3971 | /* |
| 3972 | * Use a bitwise-OR instead of a logical-OR to aggregate the |
| 3973 | * reserved bit and EPT's invalid memtype/XWR checks to avoid |
| 3974 | * adding a Jcc in the loop. |
| 3975 | */ |
| 3976 | reserved |= __is_bad_mt_xwr(rsvd_check, spte) | |
| 3977 | __is_rsvd_bits_set(rsvd_check, spte, iterator.level); |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3978 | } |
| 3979 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3980 | walk_shadow_page_lockless_end(vcpu); |
| 3981 | |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3982 | if (reserved) { |
| 3983 | pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", |
| 3984 | __func__, addr); |
Paolo Bonzini | 29ecd66 | 2015-09-06 16:24:50 +0200 | [diff] [blame] | 3985 | while (root > leaf) { |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3986 | pr_err("------ spte 0x%llx level %d.\n", |
| 3987 | sptes[root - 1], root); |
| 3988 | root--; |
| 3989 | } |
| 3990 | } |
Sean Christopherson | ddce620 | 2019-12-06 15:57:27 -0800 | [diff] [blame] | 3991 | |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3992 | *sptep = spte; |
| 3993 | return reserved; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3994 | } |
| 3995 | |
Paolo Bonzini | e08d26f | 2017-08-17 18:36:56 +0200 | [diff] [blame] | 3996 | static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3997 | { |
| 3998 | u64 spte; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3999 | bool reserved; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4000 | |
Takuya Yoshikawa | ded5874 | 2016-02-22 17:23:40 +0900 | [diff] [blame] | 4001 | if (mmio_info_in_cache(vcpu, addr, direct)) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 4002 | return RET_PF_EMULATE; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4003 | |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 4004 | reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); |
Paolo Bonzini | 450869d | 2015-11-04 13:41:21 +0100 | [diff] [blame] | 4005 | if (WARN_ON(reserved)) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 4006 | return -EINVAL; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4007 | |
| 4008 | if (is_mmio_spte(spte)) { |
| 4009 | gfn_t gfn = get_mmio_spte_gfn(spte); |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 4010 | unsigned int access = get_mmio_spte_access(spte); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4011 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 4012 | if (!check_mmio_spte(vcpu, spte)) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 4013 | return RET_PF_INVALID; |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 4014 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4015 | if (direct) |
| 4016 | addr = 0; |
Xiao Guangrong | 4f02264 | 2011-07-12 03:34:24 +0800 | [diff] [blame] | 4017 | |
| 4018 | trace_handle_mmio_page_fault(addr, gfn, access); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4019 | vcpu_cache_mmio_info(vcpu, addr, gfn, access); |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 4020 | return RET_PF_EMULATE; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4021 | } |
| 4022 | |
| 4023 | /* |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4024 | * If the page table is zapped by other cpus, let CPU fault again on |
| 4025 | * the address. |
| 4026 | */ |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 4027 | return RET_PF_RETRY; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4028 | } |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4029 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 4030 | static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, |
| 4031 | u32 error_code, gfn_t gfn) |
| 4032 | { |
| 4033 | if (unlikely(error_code & PFERR_RSVD_MASK)) |
| 4034 | return false; |
| 4035 | |
| 4036 | if (!(error_code & PFERR_PRESENT_MASK) || |
| 4037 | !(error_code & PFERR_WRITE_MASK)) |
| 4038 | return false; |
| 4039 | |
| 4040 | /* |
| 4041 | * guest is writing the page which is write tracked which can |
| 4042 | * not be fixed by page fault handler. |
| 4043 | */ |
| 4044 | if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) |
| 4045 | return true; |
| 4046 | |
| 4047 | return false; |
| 4048 | } |
| 4049 | |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 4050 | static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) |
| 4051 | { |
| 4052 | struct kvm_shadow_walk_iterator iterator; |
| 4053 | u64 spte; |
| 4054 | |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 4055 | walk_shadow_page_lockless_begin(vcpu); |
| 4056 | for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { |
| 4057 | clear_sp_write_flooding_count(iterator.sptep); |
| 4058 | if (!is_shadow_present_pte(spte)) |
| 4059 | break; |
| 4060 | } |
| 4061 | walk_shadow_page_lockless_end(vcpu); |
| 4062 | } |
| 4063 | |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4064 | static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
| 4065 | gfn_t gfn) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4066 | { |
| 4067 | struct kvm_arch_async_pf arch; |
Xiao Guangrong | fb67e14 | 2010-12-07 10:35:25 +0800 | [diff] [blame] | 4068 | |
Gleb Natapov | 7c90705 | 2010-10-14 11:22:53 +0200 | [diff] [blame] | 4069 | arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4070 | arch.gfn = gfn; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 4071 | arch.direct_map = vcpu->arch.mmu->direct_map; |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 4072 | arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4073 | |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4074 | return kvm_setup_async_pf(vcpu, cr2_or_gpa, |
| 4075 | kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4076 | } |
| 4077 | |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 4078 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4079 | gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write, |
| 4080 | bool *writable) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4081 | { |
Paolo Bonzini | 3520469 | 2015-04-02 11:20:48 +0200 | [diff] [blame] | 4082 | struct kvm_memory_slot *slot; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4083 | bool async; |
| 4084 | |
Jim Mattson | 3a2936d | 2018-05-09 17:02:05 -0400 | [diff] [blame] | 4085 | /* |
| 4086 | * Don't expose private memslots to L2. |
| 4087 | */ |
| 4088 | if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) { |
| 4089 | *pfn = KVM_PFN_NOSLOT; |
| 4090 | return false; |
| 4091 | } |
| 4092 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 4093 | slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
Paolo Bonzini | 3520469 | 2015-04-02 11:20:48 +0200 | [diff] [blame] | 4094 | async = false; |
| 4095 | *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4096 | if (!async) |
| 4097 | return false; /* *pfn has correct page already */ |
| 4098 | |
Wanpeng Li | 9bc1f09 | 2017-06-08 20:13:40 -0700 | [diff] [blame] | 4099 | if (!prefault && kvm_can_do_async_pf(vcpu)) { |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4100 | trace_kvm_try_async_get_page(cr2_or_gpa, gfn); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4101 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4102 | trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4103 | kvm_make_request(KVM_REQ_APF_HALT, vcpu); |
| 4104 | return true; |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4105 | } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn)) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4106 | return true; |
| 4107 | } |
| 4108 | |
Paolo Bonzini | 3520469 | 2015-04-02 11:20:48 +0200 | [diff] [blame] | 4109 | *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4110 | return false; |
| 4111 | } |
| 4112 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4113 | static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, |
| 4114 | bool prefault, int max_level, bool is_tdp) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4115 | { |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 4116 | bool write = error_code & PFERR_WRITE_MASK; |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 4117 | bool exec = error_code & PFERR_FETCH_MASK; |
| 4118 | bool lpage_disallowed = exec && is_nx_huge_page_enabled(); |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4119 | bool map_writable; |
Avi Kivity | ebeace8 | 2007-01-05 16:36:47 -0800 | [diff] [blame] | 4120 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4121 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 4122 | unsigned long mmu_seq; |
| 4123 | kvm_pfn_t pfn; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 4124 | int r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4125 | |
| 4126 | if (page_fault_handle_page_track(vcpu, error_code, gfn)) |
| 4127 | return RET_PF_EMULATE; |
| 4128 | |
| 4129 | r = mmu_topup_memory_caches(vcpu); |
| 4130 | if (r) |
| 4131 | return r; |
| 4132 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4133 | if (lpage_disallowed) |
| 4134 | max_level = PT_PAGE_TABLE_LEVEL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4135 | |
Sean Christopherson | f9fa250 | 2020-01-08 12:24:42 -0800 | [diff] [blame] | 4136 | if (fast_page_fault(vcpu, gpa, error_code)) |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 4137 | return RET_PF_RETRY; |
| 4138 | |
| 4139 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
| 4140 | smp_rmb(); |
| 4141 | |
| 4142 | if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) |
| 4143 | return RET_PF_RETRY; |
| 4144 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4145 | if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r)) |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 4146 | return r; |
| 4147 | |
| 4148 | r = RET_PF_RETRY; |
| 4149 | spin_lock(&vcpu->kvm->mmu_lock); |
| 4150 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
| 4151 | goto out_unlock; |
| 4152 | if (make_mmu_pages_available(vcpu) < 0) |
| 4153 | goto out_unlock; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 4154 | r = __direct_map(vcpu, gpa, write, map_writable, max_level, pfn, |
Sean Christopherson | 4cd071d | 2019-12-06 15:57:26 -0800 | [diff] [blame] | 4155 | prefault, is_tdp && lpage_disallowed); |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4156 | |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 4157 | out_unlock: |
| 4158 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 4159 | kvm_release_pfn_clean(pfn); |
| 4160 | return r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4161 | } |
| 4162 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4163 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, |
| 4164 | u32 error_code, bool prefault) |
| 4165 | { |
| 4166 | pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code); |
| 4167 | |
| 4168 | /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */ |
| 4169 | return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault, |
| 4170 | PT_DIRECTORY_LEVEL, false); |
| 4171 | } |
| 4172 | |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4173 | int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 4174 | u64 fault_address, char *insn, int insn_len) |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4175 | { |
| 4176 | int r = 1; |
| 4177 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 4178 | #ifndef CONFIG_X86_64 |
| 4179 | /* A 64-bit CR2 should be impossible on 32-bit KVM. */ |
| 4180 | if (WARN_ON_ONCE(fault_address >> 32)) |
| 4181 | return -EFAULT; |
| 4182 | #endif |
| 4183 | |
Paolo Bonzini | c595cee | 2018-07-02 13:07:14 +0200 | [diff] [blame] | 4184 | vcpu->arch.l1tf_flush_l1d = true; |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4185 | switch (vcpu->arch.apf.host_apf_reason) { |
| 4186 | default: |
| 4187 | trace_kvm_page_fault(fault_address, error_code); |
| 4188 | |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 4189 | if (kvm_event_needs_reinjection(vcpu)) |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4190 | kvm_mmu_unprotect_page_virt(vcpu, fault_address); |
| 4191 | r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn, |
| 4192 | insn_len); |
| 4193 | break; |
| 4194 | case KVM_PV_REASON_PAGE_NOT_PRESENT: |
| 4195 | vcpu->arch.apf.host_apf_reason = 0; |
| 4196 | local_irq_disable(); |
Boqun Feng | a2b7861 | 2017-10-03 21:36:51 +0800 | [diff] [blame] | 4197 | kvm_async_pf_task_wait(fault_address, 0); |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4198 | local_irq_enable(); |
| 4199 | break; |
| 4200 | case KVM_PV_REASON_PAGE_READY: |
| 4201 | vcpu->arch.apf.host_apf_reason = 0; |
| 4202 | local_irq_disable(); |
| 4203 | kvm_async_pf_task_wake(fault_address); |
| 4204 | local_irq_enable(); |
| 4205 | break; |
| 4206 | } |
| 4207 | return r; |
| 4208 | } |
| 4209 | EXPORT_SYMBOL_GPL(kvm_handle_page_fault); |
| 4210 | |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 4211 | int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, |
| 4212 | bool prefault) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4213 | { |
Sean Christopherson | cb9b88c | 2019-12-06 15:57:18 -0800 | [diff] [blame] | 4214 | int max_level; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4215 | |
Sean Christopherson | cb9b88c | 2019-12-06 15:57:18 -0800 | [diff] [blame] | 4216 | for (max_level = PT_MAX_HUGEPAGE_LEVEL; |
| 4217 | max_level > PT_PAGE_TABLE_LEVEL; |
| 4218 | max_level--) { |
| 4219 | int page_num = KVM_PAGES_PER_HPAGE(max_level); |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4220 | gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4221 | |
Sean Christopherson | cb9b88c | 2019-12-06 15:57:18 -0800 | [diff] [blame] | 4222 | if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num)) |
| 4223 | break; |
Takuya Yoshikawa | fd13690 | 2015-10-16 17:06:02 +0900 | [diff] [blame] | 4224 | } |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 4225 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4226 | return direct_page_fault(vcpu, gpa, error_code, prefault, |
| 4227 | max_level, true); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4228 | } |
| 4229 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4230 | static void nonpaging_init_context(struct kvm_vcpu *vcpu, |
| 4231 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4232 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4233 | context->page_fault = nonpaging_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4234 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4235 | context->sync_page = nonpaging_sync_page; |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 4236 | context->invlpg = NULL; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 4237 | context->update_pte = nonpaging_update_pte; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 4238 | context->root_level = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4239 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4240 | context->direct_map = true; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4241 | context->nx = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4242 | } |
| 4243 | |
Sean Christopherson | 0be4435 | 2020-02-28 14:52:40 -0800 | [diff] [blame] | 4244 | static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t cr3, |
| 4245 | union kvm_mmu_page_role role) |
| 4246 | { |
| 4247 | return (role.direct || cr3 == root->cr3) && |
| 4248 | VALID_PAGE(root->hpa) && page_header(root->hpa) && |
| 4249 | role.word == page_header(root->hpa)->role.word; |
| 4250 | } |
| 4251 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4252 | /* |
| 4253 | * Find out if a previously cached root matching the new CR3/role is available. |
| 4254 | * The current root is also inserted into the cache. |
| 4255 | * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is |
| 4256 | * returned. |
| 4257 | * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and |
| 4258 | * false is returned. This root should now be freed by the caller. |
| 4259 | */ |
| 4260 | static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, |
| 4261 | union kvm_mmu_page_role new_role) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4262 | { |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4263 | uint i; |
| 4264 | struct kvm_mmu_root_info root; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 4265 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4266 | |
Vitaly Kuznetsov | ad7dc69 | 2019-02-22 17:45:01 +0100 | [diff] [blame] | 4267 | root.cr3 = mmu->root_cr3; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4268 | root.hpa = mmu->root_hpa; |
| 4269 | |
Sean Christopherson | 0be4435 | 2020-02-28 14:52:40 -0800 | [diff] [blame] | 4270 | if (is_root_usable(&root, new_cr3, new_role)) |
| 4271 | return true; |
| 4272 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4273 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { |
| 4274 | swap(root, mmu->prev_roots[i]); |
| 4275 | |
Sean Christopherson | 0be4435 | 2020-02-28 14:52:40 -0800 | [diff] [blame] | 4276 | if (is_root_usable(&root, new_cr3, new_role)) |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4277 | break; |
| 4278 | } |
| 4279 | |
| 4280 | mmu->root_hpa = root.hpa; |
Vitaly Kuznetsov | ad7dc69 | 2019-02-22 17:45:01 +0100 | [diff] [blame] | 4281 | mmu->root_cr3 = root.cr3; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4282 | |
| 4283 | return i < KVM_MMU_NUM_PREV_ROOTS; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4284 | } |
| 4285 | |
Junaid Shahid | 0aab33e | 2018-06-27 14:59:09 -0700 | [diff] [blame] | 4286 | static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame^] | 4287 | union kvm_mmu_page_role new_role) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4288 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 4289 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 4290 | |
| 4291 | /* |
| 4292 | * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid |
| 4293 | * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs |
| 4294 | * later if necessary. |
| 4295 | */ |
| 4296 | if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame^] | 4297 | mmu->root_level >= PT64_ROOT_4LEVEL) |
| 4298 | return !mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT) && |
| 4299 | cached_root_available(vcpu, new_cr3, new_role); |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 4300 | |
| 4301 | return false; |
| 4302 | } |
| 4303 | |
Junaid Shahid | 0aab33e | 2018-06-27 14:59:09 -0700 | [diff] [blame] | 4304 | static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, |
Junaid Shahid | ade61e2 | 2018-06-27 14:59:15 -0700 | [diff] [blame] | 4305 | union kvm_mmu_page_role new_role, |
| 4306 | bool skip_tlb_flush) |
Junaid Shahid | 0aab33e | 2018-06-27 14:59:09 -0700 | [diff] [blame] | 4307 | { |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame^] | 4308 | if (!fast_cr3_switch(vcpu, new_cr3, new_role)) { |
| 4309 | kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT); |
| 4310 | return; |
| 4311 | } |
| 4312 | |
| 4313 | /* |
| 4314 | * It's possible that the cached previous root page is obsolete because |
| 4315 | * of a change in the MMU generation number. However, changing the |
| 4316 | * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will |
| 4317 | * free the root set here and allocate a new one. |
| 4318 | */ |
| 4319 | kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); |
| 4320 | |
| 4321 | if (!skip_tlb_flush) { |
| 4322 | kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); |
| 4323 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
| 4324 | } |
| 4325 | |
| 4326 | /* |
| 4327 | * The last MMIO access's GVA and GPA are cached in the VCPU. When |
| 4328 | * switching to a new CR3, that GVA->GPA mapping may no longer be |
| 4329 | * valid. So clear any cached MMIO info even when we don't need to sync |
| 4330 | * the shadow page tables. |
| 4331 | */ |
| 4332 | vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); |
| 4333 | |
| 4334 | __clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa)); |
Junaid Shahid | 0aab33e | 2018-06-27 14:59:09 -0700 | [diff] [blame] | 4335 | } |
| 4336 | |
Junaid Shahid | ade61e2 | 2018-06-27 14:59:15 -0700 | [diff] [blame] | 4337 | void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush) |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 4338 | { |
Junaid Shahid | ade61e2 | 2018-06-27 14:59:15 -0700 | [diff] [blame] | 4339 | __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu), |
| 4340 | skip_tlb_flush); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4341 | } |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 4342 | EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4343 | |
Joerg Roedel | 5777ed3 | 2010-09-10 17:30:42 +0200 | [diff] [blame] | 4344 | static unsigned long get_cr3(struct kvm_vcpu *vcpu) |
| 4345 | { |
Avi Kivity | 9f8fe50 | 2010-12-05 17:30:00 +0200 | [diff] [blame] | 4346 | return kvm_read_cr3(vcpu); |
Joerg Roedel | 5777ed3 | 2010-09-10 17:30:42 +0200 | [diff] [blame] | 4347 | } |
| 4348 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 4349 | static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 4350 | unsigned int access, int *nr_present) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4351 | { |
| 4352 | if (unlikely(is_mmio_spte(*sptep))) { |
| 4353 | if (gfn != get_mmio_spte_gfn(*sptep)) { |
| 4354 | mmu_spte_clear_no_track(sptep); |
| 4355 | return true; |
| 4356 | } |
| 4357 | |
| 4358 | (*nr_present)++; |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 4359 | mark_mmio_spte(vcpu, sptep, gfn, access); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4360 | return true; |
| 4361 | } |
| 4362 | |
| 4363 | return false; |
| 4364 | } |
| 4365 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4366 | static inline bool is_last_gpte(struct kvm_mmu *mmu, |
| 4367 | unsigned level, unsigned gpte) |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4368 | { |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4369 | /* |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4370 | * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. |
| 4371 | * If it is clear, there are no large pages at this level, so clear |
| 4372 | * PT_PAGE_SIZE_MASK in gpte if that is the case. |
| 4373 | */ |
| 4374 | gpte &= level - mmu->last_nonleaf_level; |
| 4375 | |
Ladi Prosek | 829ee27 | 2017-10-05 11:10:23 +0200 | [diff] [blame] | 4376 | /* |
| 4377 | * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set |
| 4378 | * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means |
| 4379 | * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. |
| 4380 | */ |
| 4381 | gpte |= level - PT_PAGE_TABLE_LEVEL - 1; |
| 4382 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4383 | return gpte & PT_PAGE_SIZE_MASK; |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4384 | } |
| 4385 | |
Nadav Har'El | 37406aa | 2013-08-05 11:07:12 +0300 | [diff] [blame] | 4386 | #define PTTYPE_EPT 18 /* arbitrary */ |
| 4387 | #define PTTYPE PTTYPE_EPT |
| 4388 | #include "paging_tmpl.h" |
| 4389 | #undef PTTYPE |
| 4390 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4391 | #define PTTYPE 64 |
| 4392 | #include "paging_tmpl.h" |
| 4393 | #undef PTTYPE |
| 4394 | |
| 4395 | #define PTTYPE 32 |
| 4396 | #include "paging_tmpl.h" |
| 4397 | #undef PTTYPE |
| 4398 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4399 | static void |
| 4400 | __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, |
| 4401 | struct rsvd_bits_validate *rsvd_check, |
| 4402 | int maxphyaddr, int level, bool nx, bool gbpages, |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4403 | bool pse, bool amd) |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4404 | { |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4405 | u64 exb_bit_rsvd = 0; |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 4406 | u64 gbpages_bit_rsvd = 0; |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 4407 | u64 nonleaf_bit8_rsvd = 0; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4408 | |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4409 | rsvd_check->bad_mt_xwr = 0; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4410 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4411 | if (!nx) |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4412 | exb_bit_rsvd = rsvd_bits(63, 63); |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4413 | if (!gbpages) |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 4414 | gbpages_bit_rsvd = rsvd_bits(7, 7); |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 4415 | |
| 4416 | /* |
| 4417 | * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for |
| 4418 | * leaf entries) on AMD CPUs only. |
| 4419 | */ |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4420 | if (amd) |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 4421 | nonleaf_bit8_rsvd = rsvd_bits(8, 8); |
| 4422 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4423 | switch (level) { |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4424 | case PT32_ROOT_LEVEL: |
| 4425 | /* no rsvd bits for 2 level 4K page table entries */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4426 | rsvd_check->rsvd_bits_mask[0][1] = 0; |
| 4427 | rsvd_check->rsvd_bits_mask[0][0] = 0; |
| 4428 | rsvd_check->rsvd_bits_mask[1][0] = |
| 4429 | rsvd_check->rsvd_bits_mask[0][0]; |
Xiao Guangrong | f815bce | 2010-03-19 17:58:53 +0800 | [diff] [blame] | 4430 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4431 | if (!pse) { |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4432 | rsvd_check->rsvd_bits_mask[1][1] = 0; |
Xiao Guangrong | f815bce | 2010-03-19 17:58:53 +0800 | [diff] [blame] | 4433 | break; |
| 4434 | } |
| 4435 | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4436 | if (is_cpuid_PSE36()) |
| 4437 | /* 36bits PSE 4MB page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4438 | rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4439 | else |
| 4440 | /* 32 bits PSE 4MB page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4441 | rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4442 | break; |
| 4443 | case PT32E_ROOT_LEVEL: |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4444 | rsvd_check->rsvd_bits_mask[0][2] = |
Dong, Eddie | 20c466b | 2009-03-31 23:03:45 +0800 | [diff] [blame] | 4445 | rsvd_bits(maxphyaddr, 63) | |
Nadav Amit | cd9ae5f | 2014-04-04 06:31:04 +0300 | [diff] [blame] | 4446 | rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4447 | rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | |
Sheng Yang | 4c26b4c | 2009-04-02 10:28:37 +0800 | [diff] [blame] | 4448 | rsvd_bits(maxphyaddr, 62); /* PDE */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4449 | rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4450 | rsvd_bits(maxphyaddr, 62); /* PTE */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4451 | rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4452 | rsvd_bits(maxphyaddr, 62) | |
| 4453 | rsvd_bits(13, 20); /* large page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4454 | rsvd_check->rsvd_bits_mask[1][0] = |
| 4455 | rsvd_check->rsvd_bits_mask[0][0]; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4456 | break; |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4457 | case PT64_ROOT_5LEVEL: |
| 4458 | rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd | |
| 4459 | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | |
| 4460 | rsvd_bits(maxphyaddr, 51); |
| 4461 | rsvd_check->rsvd_bits_mask[1][4] = |
| 4462 | rsvd_check->rsvd_bits_mask[0][4]; |
Gustavo A. R. Silva | b2869f2 | 2019-01-25 12:23:17 -0600 | [diff] [blame] | 4463 | /* fall through */ |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 4464 | case PT64_ROOT_4LEVEL: |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4465 | rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | |
| 4466 | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | |
Sheng Yang | 4c26b4c | 2009-04-02 10:28:37 +0800 | [diff] [blame] | 4467 | rsvd_bits(maxphyaddr, 51); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4468 | rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | |
| 4469 | nonleaf_bit8_rsvd | gbpages_bit_rsvd | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4470 | rsvd_bits(maxphyaddr, 51); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4471 | rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | |
| 4472 | rsvd_bits(maxphyaddr, 51); |
| 4473 | rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | |
| 4474 | rsvd_bits(maxphyaddr, 51); |
| 4475 | rsvd_check->rsvd_bits_mask[1][3] = |
| 4476 | rsvd_check->rsvd_bits_mask[0][3]; |
| 4477 | rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd | |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 4478 | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 4479 | rsvd_bits(13, 29); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4480 | rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | |
Sheng Yang | 4c26b4c | 2009-04-02 10:28:37 +0800 | [diff] [blame] | 4481 | rsvd_bits(maxphyaddr, 51) | |
| 4482 | rsvd_bits(13, 20); /* large page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4483 | rsvd_check->rsvd_bits_mask[1][0] = |
| 4484 | rsvd_check->rsvd_bits_mask[0][0]; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4485 | break; |
| 4486 | } |
| 4487 | } |
| 4488 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4489 | static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, |
| 4490 | struct kvm_mmu *context) |
| 4491 | { |
| 4492 | __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, |
| 4493 | cpuid_maxphyaddr(vcpu), context->root_level, |
Radim Krčmář | d6321d4 | 2017-08-05 00:12:49 +0200 | [diff] [blame] | 4494 | context->nx, |
| 4495 | guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), |
Sean Christopherson | 23493d0 | 2020-03-04 17:34:33 -0800 | [diff] [blame] | 4496 | is_pse(vcpu), |
| 4497 | guest_cpuid_is_amd_or_hygon(vcpu)); |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4498 | } |
| 4499 | |
Xiao Guangrong | 81b8eeb | 2015-08-05 12:04:23 +0800 | [diff] [blame] | 4500 | static void |
| 4501 | __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, |
| 4502 | int maxphyaddr, bool execonly) |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4503 | { |
Paolo Bonzini | 951f9fd | 2015-09-23 10:34:26 +0200 | [diff] [blame] | 4504 | u64 bad_mt_xwr; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4505 | |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4506 | rsvd_check->rsvd_bits_mask[0][4] = |
| 4507 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4508 | rsvd_check->rsvd_bits_mask[0][3] = |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4509 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4510 | rsvd_check->rsvd_bits_mask[0][2] = |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4511 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4512 | rsvd_check->rsvd_bits_mask[0][1] = |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4513 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4514 | rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4515 | |
| 4516 | /* large page */ |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4517 | rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4518 | rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; |
| 4519 | rsvd_check->rsvd_bits_mask[1][2] = |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4520 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4521 | rsvd_check->rsvd_bits_mask[1][1] = |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4522 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4523 | rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4524 | |
Paolo Bonzini | 951f9fd | 2015-09-23 10:34:26 +0200 | [diff] [blame] | 4525 | bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ |
| 4526 | bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */ |
| 4527 | bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */ |
| 4528 | bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */ |
| 4529 | bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */ |
| 4530 | if (!execonly) { |
| 4531 | /* bits 0..2 must not be 100 unless VMX capabilities allow it */ |
| 4532 | bad_mt_xwr |= REPEAT_BYTE(1ull << 4); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4533 | } |
Paolo Bonzini | 951f9fd | 2015-09-23 10:34:26 +0200 | [diff] [blame] | 4534 | rsvd_check->bad_mt_xwr = bad_mt_xwr; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4535 | } |
| 4536 | |
Xiao Guangrong | 81b8eeb | 2015-08-05 12:04:23 +0800 | [diff] [blame] | 4537 | static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, |
| 4538 | struct kvm_mmu *context, bool execonly) |
| 4539 | { |
| 4540 | __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, |
| 4541 | cpuid_maxphyaddr(vcpu), execonly); |
| 4542 | } |
| 4543 | |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4544 | /* |
| 4545 | * the page table on host is the shadow page table for the page |
| 4546 | * table in guest or amd nested guest, its mmu features completely |
| 4547 | * follow the features in guest. |
| 4548 | */ |
| 4549 | void |
| 4550 | reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) |
| 4551 | { |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 4552 | bool uses_nx = context->nx || |
| 4553 | context->mmu_role.base.smep_andnot_wp; |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4554 | struct rsvd_bits_validate *shadow_zero_check; |
| 4555 | int i; |
Paolo Bonzini | 5f0b819 | 2016-03-09 14:28:02 +0100 | [diff] [blame] | 4556 | |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4557 | /* |
| 4558 | * Passing "true" to the last argument is okay; it adds a check |
| 4559 | * on bit 8 of the SPTEs which KVM doesn't use anyway. |
| 4560 | */ |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4561 | shadow_zero_check = &context->shadow_zero_check; |
| 4562 | __reset_rsvds_bits_mask(vcpu, shadow_zero_check, |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 4563 | shadow_phys_bits, |
Paolo Bonzini | 5f0b819 | 2016-03-09 14:28:02 +0100 | [diff] [blame] | 4564 | context->shadow_root_level, uses_nx, |
Radim Krčmář | d6321d4 | 2017-08-05 00:12:49 +0200 | [diff] [blame] | 4565 | guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), |
| 4566 | is_pse(vcpu), true); |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4567 | |
| 4568 | if (!shadow_me_mask) |
| 4569 | return; |
| 4570 | |
| 4571 | for (i = context->shadow_root_level; --i >= 0;) { |
| 4572 | shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; |
| 4573 | shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; |
| 4574 | } |
| 4575 | |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4576 | } |
| 4577 | EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); |
| 4578 | |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4579 | static inline bool boot_cpu_is_amd(void) |
| 4580 | { |
| 4581 | WARN_ON_ONCE(!tdp_enabled); |
| 4582 | return shadow_x_mask == 0; |
| 4583 | } |
| 4584 | |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4585 | /* |
| 4586 | * the direct page table on host, use as much mmu features as |
| 4587 | * possible, however, kvm currently does not do execution-protection. |
| 4588 | */ |
| 4589 | static void |
| 4590 | reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, |
| 4591 | struct kvm_mmu *context) |
| 4592 | { |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4593 | struct rsvd_bits_validate *shadow_zero_check; |
| 4594 | int i; |
| 4595 | |
| 4596 | shadow_zero_check = &context->shadow_zero_check; |
| 4597 | |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4598 | if (boot_cpu_is_amd()) |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4599 | __reset_rsvds_bits_mask(vcpu, shadow_zero_check, |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 4600 | shadow_phys_bits, |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4601 | context->shadow_root_level, false, |
Borislav Petkov | b8291adc | 2016-03-29 17:41:58 +0200 | [diff] [blame] | 4602 | boot_cpu_has(X86_FEATURE_GBPAGES), |
| 4603 | true, true); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4604 | else |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4605 | __reset_rsvds_bits_mask_ept(shadow_zero_check, |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 4606 | shadow_phys_bits, |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4607 | false); |
| 4608 | |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4609 | if (!shadow_me_mask) |
| 4610 | return; |
| 4611 | |
| 4612 | for (i = context->shadow_root_level; --i >= 0;) { |
| 4613 | shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; |
| 4614 | shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; |
| 4615 | } |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4616 | } |
| 4617 | |
| 4618 | /* |
| 4619 | * as the comments in reset_shadow_zero_bits_mask() except it |
| 4620 | * is the shadow page table for intel nested guest. |
| 4621 | */ |
| 4622 | static void |
| 4623 | reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, |
| 4624 | struct kvm_mmu *context, bool execonly) |
| 4625 | { |
| 4626 | __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 4627 | shadow_phys_bits, execonly); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4628 | } |
| 4629 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4630 | #define BYTE_MASK(access) \ |
| 4631 | ((1 & (access) ? 2 : 0) | \ |
| 4632 | (2 & (access) ? 4 : 0) | \ |
| 4633 | (3 & (access) ? 8 : 0) | \ |
| 4634 | (4 & (access) ? 16 : 0) | \ |
| 4635 | (5 & (access) ? 32 : 0) | \ |
| 4636 | (6 & (access) ? 64 : 0) | \ |
| 4637 | (7 & (access) ? 128 : 0)) |
| 4638 | |
| 4639 | |
Xiao Guangrong | edc90b7 | 2015-05-11 22:55:21 +0800 | [diff] [blame] | 4640 | static void update_permission_bitmask(struct kvm_vcpu *vcpu, |
| 4641 | struct kvm_mmu *mmu, bool ept) |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4642 | { |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4643 | unsigned byte; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4644 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4645 | const u8 x = BYTE_MASK(ACC_EXEC_MASK); |
| 4646 | const u8 w = BYTE_MASK(ACC_WRITE_MASK); |
| 4647 | const u8 u = BYTE_MASK(ACC_USER_MASK); |
| 4648 | |
| 4649 | bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0; |
| 4650 | bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0; |
| 4651 | bool cr0_wp = is_write_protection(vcpu); |
| 4652 | |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4653 | for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4654 | unsigned pfec = byte << 1; |
| 4655 | |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 4656 | /* |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4657 | * Each "*f" variable has a 1 bit for each UWX value |
| 4658 | * that causes a fault with the given PFEC. |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 4659 | */ |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4660 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4661 | /* Faults from writes to non-writable pages */ |
Arnd Bergmann | a6a6d3b | 2019-07-12 11:12:30 +0200 | [diff] [blame] | 4662 | u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0; |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4663 | /* Faults from user mode accesses to supervisor pages */ |
Arnd Bergmann | a6a6d3b | 2019-07-12 11:12:30 +0200 | [diff] [blame] | 4664 | u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0; |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4665 | /* Faults from fetches of non-executable pages*/ |
Arnd Bergmann | a6a6d3b | 2019-07-12 11:12:30 +0200 | [diff] [blame] | 4666 | u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0; |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4667 | /* Faults from kernel mode fetches of user pages */ |
| 4668 | u8 smepf = 0; |
| 4669 | /* Faults from kernel mode accesses of user pages */ |
| 4670 | u8 smapf = 0; |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 4671 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4672 | if (!ept) { |
| 4673 | /* Faults from kernel mode accesses to user pages */ |
| 4674 | u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4675 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4676 | /* Not really needed: !nx will cause pte.nx to fault */ |
| 4677 | if (!mmu->nx) |
| 4678 | ff = 0; |
| 4679 | |
| 4680 | /* Allow supervisor writes if !cr0.wp */ |
| 4681 | if (!cr0_wp) |
| 4682 | wf = (pfec & PFERR_USER_MASK) ? wf : 0; |
| 4683 | |
| 4684 | /* Disallow supervisor fetches of user code if cr4.smep */ |
| 4685 | if (cr4_smep) |
| 4686 | smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0; |
| 4687 | |
| 4688 | /* |
| 4689 | * SMAP:kernel-mode data accesses from user-mode |
| 4690 | * mappings should fault. A fault is considered |
| 4691 | * as a SMAP violation if all of the following |
Peng Hao | 39337ad | 2018-10-04 11:45:00 -0400 | [diff] [blame] | 4692 | * conditions are true: |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4693 | * - X86_CR4_SMAP is set in CR4 |
| 4694 | * - A user page is accessed |
| 4695 | * - The access is not a fetch |
| 4696 | * - Page fault in kernel mode |
| 4697 | * - if CPL = 3 or X86_EFLAGS_AC is clear |
| 4698 | * |
| 4699 | * Here, we cover the first three conditions. |
| 4700 | * The fourth is computed dynamically in permission_fault(); |
| 4701 | * PFERR_RSVD_MASK bit will be set in PFEC if the access is |
| 4702 | * *not* subject to SMAP restrictions. |
| 4703 | */ |
| 4704 | if (cr4_smap) |
| 4705 | smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4706 | } |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4707 | |
| 4708 | mmu->permissions[byte] = ff | uf | wf | smepf | smapf; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4709 | } |
| 4710 | } |
| 4711 | |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4712 | /* |
| 4713 | * PKU is an additional mechanism by which the paging controls access to |
| 4714 | * user-mode addresses based on the value in the PKRU register. Protection |
| 4715 | * key violations are reported through a bit in the page fault error code. |
| 4716 | * Unlike other bits of the error code, the PK bit is not known at the |
| 4717 | * call site of e.g. gva_to_gpa; it must be computed directly in |
| 4718 | * permission_fault based on two bits of PKRU, on some machine state (CR4, |
| 4719 | * CR0, EFER, CPL), and on other bits of the error code and the page tables. |
| 4720 | * |
| 4721 | * In particular the following conditions come from the error code, the |
| 4722 | * page tables and the machine state: |
| 4723 | * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1 |
| 4724 | * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch) |
| 4725 | * - PK is always zero if U=0 in the page tables |
| 4726 | * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access. |
| 4727 | * |
| 4728 | * The PKRU bitmask caches the result of these four conditions. The error |
| 4729 | * code (minus the P bit) and the page table's U bit form an index into the |
| 4730 | * PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed |
| 4731 | * with the two bits of the PKRU register corresponding to the protection key. |
| 4732 | * For the first three conditions above the bits will be 00, thus masking |
| 4733 | * away both AD and WD. For all reads or if the last condition holds, WD |
| 4734 | * only will be masked away. |
| 4735 | */ |
| 4736 | static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 4737 | bool ept) |
| 4738 | { |
| 4739 | unsigned bit; |
| 4740 | bool wp; |
| 4741 | |
| 4742 | if (ept) { |
| 4743 | mmu->pkru_mask = 0; |
| 4744 | return; |
| 4745 | } |
| 4746 | |
| 4747 | /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */ |
| 4748 | if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) { |
| 4749 | mmu->pkru_mask = 0; |
| 4750 | return; |
| 4751 | } |
| 4752 | |
| 4753 | wp = is_write_protection(vcpu); |
| 4754 | |
| 4755 | for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { |
| 4756 | unsigned pfec, pkey_bits; |
| 4757 | bool check_pkey, check_write, ff, uf, wf, pte_user; |
| 4758 | |
| 4759 | pfec = bit << 1; |
| 4760 | ff = pfec & PFERR_FETCH_MASK; |
| 4761 | uf = pfec & PFERR_USER_MASK; |
| 4762 | wf = pfec & PFERR_WRITE_MASK; |
| 4763 | |
| 4764 | /* PFEC.RSVD is replaced by ACC_USER_MASK. */ |
| 4765 | pte_user = pfec & PFERR_RSVD_MASK; |
| 4766 | |
| 4767 | /* |
| 4768 | * Only need to check the access which is not an |
| 4769 | * instruction fetch and is to a user page. |
| 4770 | */ |
| 4771 | check_pkey = (!ff && pte_user); |
| 4772 | /* |
| 4773 | * write access is controlled by PKRU if it is a |
| 4774 | * user access or CR0.WP = 1. |
| 4775 | */ |
| 4776 | check_write = check_pkey && wf && (uf || wp); |
| 4777 | |
| 4778 | /* PKRU.AD stops both read and write access. */ |
| 4779 | pkey_bits = !!check_pkey; |
| 4780 | /* PKRU.WD stops write access. */ |
| 4781 | pkey_bits |= (!!check_write) << 1; |
| 4782 | |
| 4783 | mmu->pkru_mask |= (pkey_bits & 3) << pfec; |
| 4784 | } |
| 4785 | } |
| 4786 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4787 | static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4788 | { |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4789 | unsigned root_level = mmu->root_level; |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4790 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4791 | mmu->last_nonleaf_level = root_level; |
| 4792 | if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu)) |
| 4793 | mmu->last_nonleaf_level++; |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4794 | } |
| 4795 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4796 | static void paging64_init_context_common(struct kvm_vcpu *vcpu, |
| 4797 | struct kvm_mmu *context, |
| 4798 | int level) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4799 | { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4800 | context->nx = is_nx(vcpu); |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4801 | context->root_level = level; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4802 | |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4803 | reset_rsvds_bits_mask(vcpu, context); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4804 | update_permission_bitmask(vcpu, context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4805 | update_pkru_bitmask(vcpu, context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4806 | update_last_nonleaf_level(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4807 | |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 4808 | MMU_WARN_ON(!is_pae(vcpu)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4809 | context->page_fault = paging64_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4810 | context->gva_to_gpa = paging64_gva_to_gpa; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4811 | context->sync_page = paging64_sync_page; |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 4812 | context->invlpg = paging64_invlpg; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 4813 | context->update_pte = paging64_update_pte; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 4814 | context->shadow_root_level = level; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4815 | context->direct_map = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4816 | } |
| 4817 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4818 | static void paging64_init_context(struct kvm_vcpu *vcpu, |
| 4819 | struct kvm_mmu *context) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 4820 | { |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4821 | int root_level = is_la57_mode(vcpu) ? |
| 4822 | PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
| 4823 | |
| 4824 | paging64_init_context_common(vcpu, context, root_level); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 4825 | } |
| 4826 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4827 | static void paging32_init_context(struct kvm_vcpu *vcpu, |
| 4828 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4829 | { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4830 | context->nx = false; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4831 | context->root_level = PT32_ROOT_LEVEL; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4832 | |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4833 | reset_rsvds_bits_mask(vcpu, context); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4834 | update_permission_bitmask(vcpu, context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4835 | update_pkru_bitmask(vcpu, context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4836 | update_last_nonleaf_level(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4837 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4838 | context->page_fault = paging32_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4839 | context->gva_to_gpa = paging32_gva_to_gpa; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4840 | context->sync_page = paging32_sync_page; |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 4841 | context->invlpg = paging32_invlpg; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 4842 | context->update_pte = paging32_update_pte; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4843 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4844 | context->direct_map = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4845 | } |
| 4846 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4847 | static void paging32E_init_context(struct kvm_vcpu *vcpu, |
| 4848 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4849 | { |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4850 | paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4851 | } |
| 4852 | |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4853 | static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4854 | { |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4855 | union kvm_mmu_extended_role ext = {0}; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4856 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4857 | ext.cr0_pg = !!is_paging(vcpu); |
Vitaly Kuznetsov | 0699c64 | 2019-04-30 19:33:26 +0200 | [diff] [blame] | 4858 | ext.cr4_pae = !!is_pae(vcpu); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4859 | ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); |
| 4860 | ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); |
| 4861 | ext.cr4_pse = !!is_pse(vcpu); |
| 4862 | ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); |
Yu Zhang | de3ccd2 | 2019-02-01 00:09:23 +0800 | [diff] [blame] | 4863 | ext.maxphyaddr = cpuid_maxphyaddr(vcpu); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4864 | |
| 4865 | ext.valid = 1; |
| 4866 | |
| 4867 | return ext; |
| 4868 | } |
| 4869 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4870 | static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, |
| 4871 | bool base_only) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4872 | { |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4873 | union kvm_mmu_role role = {0}; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4874 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4875 | role.base.access = ACC_ALL; |
| 4876 | role.base.nxe = !!is_nx(vcpu); |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4877 | role.base.cr0_wp = is_write_protection(vcpu); |
| 4878 | role.base.smm = is_smm(vcpu); |
| 4879 | role.base.guest_mode = is_guest_mode(vcpu); |
| 4880 | |
| 4881 | if (base_only) |
| 4882 | return role; |
| 4883 | |
| 4884 | role.ext = kvm_calc_mmu_role_ext(vcpu); |
| 4885 | |
| 4886 | return role; |
| 4887 | } |
| 4888 | |
| 4889 | static union kvm_mmu_role |
| 4890 | kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) |
| 4891 | { |
| 4892 | union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); |
| 4893 | |
| 4894 | role.base.ad_disabled = (shadow_accessed_mask == 0); |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 4895 | role.base.level = kvm_x86_ops.get_tdp_level(vcpu); |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4896 | role.base.direct = true; |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 4897 | role.base.gpte_is_8_bytes = true; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4898 | |
| 4899 | return role; |
| 4900 | } |
| 4901 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4902 | static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4903 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 4904 | struct kvm_mmu *context = vcpu->arch.mmu; |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4905 | union kvm_mmu_role new_role = |
| 4906 | kvm_calc_tdp_mmu_root_page_role(vcpu, false); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4907 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4908 | if (new_role.as_u64 == context->mmu_role.as_u64) |
| 4909 | return; |
| 4910 | |
| 4911 | context->mmu_role.as_u64 = new_role.as_u64; |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 4912 | context->page_fault = kvm_tdp_page_fault; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4913 | context->sync_page = nonpaging_sync_page; |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 4914 | context->invlpg = NULL; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 4915 | context->update_pte = nonpaging_update_pte; |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 4916 | context->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu); |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4917 | context->direct_map = true; |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 4918 | context->get_guest_pgd = get_cr3; |
Avi Kivity | e4e517b | 2011-07-28 11:36:17 +0300 | [diff] [blame] | 4919 | context->get_pdptr = kvm_pdptr_read; |
Joerg Roedel | cb659db | 2010-09-10 17:30:43 +0200 | [diff] [blame] | 4920 | context->inject_page_fault = kvm_inject_page_fault; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4921 | |
| 4922 | if (!is_paging(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4923 | context->nx = false; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4924 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
| 4925 | context->root_level = 0; |
| 4926 | } else if (is_long_mode(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4927 | context->nx = is_nx(vcpu); |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4928 | context->root_level = is_la57_mode(vcpu) ? |
| 4929 | PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4930 | reset_rsvds_bits_mask(vcpu, context); |
| 4931 | context->gva_to_gpa = paging64_gva_to_gpa; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4932 | } else if (is_pae(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4933 | context->nx = is_nx(vcpu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4934 | context->root_level = PT32E_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4935 | reset_rsvds_bits_mask(vcpu, context); |
| 4936 | context->gva_to_gpa = paging64_gva_to_gpa; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4937 | } else { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4938 | context->nx = false; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4939 | context->root_level = PT32_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4940 | reset_rsvds_bits_mask(vcpu, context); |
| 4941 | context->gva_to_gpa = paging32_gva_to_gpa; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4942 | } |
| 4943 | |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4944 | update_permission_bitmask(vcpu, context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4945 | update_pkru_bitmask(vcpu, context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4946 | update_last_nonleaf_level(vcpu, context); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4947 | reset_tdp_shadow_zero_bits_mask(vcpu, context); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4948 | } |
| 4949 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4950 | static union kvm_mmu_role |
| 4951 | kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4952 | { |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4953 | union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 4954 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4955 | role.base.smep_andnot_wp = role.ext.cr4_smep && |
| 4956 | !is_write_protection(vcpu); |
| 4957 | role.base.smap_andnot_wp = role.ext.cr4_smap && |
| 4958 | !is_write_protection(vcpu); |
| 4959 | role.base.direct = !is_paging(vcpu); |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 4960 | role.base.gpte_is_8_bytes = !!is_pae(vcpu); |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4961 | |
| 4962 | if (!is_long_mode(vcpu)) |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4963 | role.base.level = PT32E_ROOT_LEVEL; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4964 | else if (is_la57_mode(vcpu)) |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4965 | role.base.level = PT64_ROOT_5LEVEL; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4966 | else |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4967 | role.base.level = PT64_ROOT_4LEVEL; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4968 | |
| 4969 | return role; |
| 4970 | } |
| 4971 | |
| 4972 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) |
| 4973 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 4974 | struct kvm_mmu *context = vcpu->arch.mmu; |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4975 | union kvm_mmu_role new_role = |
| 4976 | kvm_calc_shadow_mmu_root_page_role(vcpu, false); |
| 4977 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4978 | if (new_role.as_u64 == context->mmu_role.as_u64) |
| 4979 | return; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4980 | |
| 4981 | if (!is_paging(vcpu)) |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4982 | nonpaging_init_context(vcpu, context); |
Avi Kivity | a9058ec | 2006-12-29 16:49:37 -0800 | [diff] [blame] | 4983 | else if (is_long_mode(vcpu)) |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4984 | paging64_init_context(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4985 | else if (is_pae(vcpu)) |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4986 | paging32E_init_context(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4987 | else |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4988 | paging32_init_context(vcpu, context); |
Avi Kivity | a770f6f | 2008-12-21 19:20:09 +0200 | [diff] [blame] | 4989 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4990 | context->mmu_role.as_u64 = new_role.as_u64; |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4991 | reset_shadow_zero_bits_mask(vcpu, context); |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 4992 | } |
| 4993 | EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); |
| 4994 | |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4995 | static union kvm_mmu_role |
| 4996 | kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 4997 | bool execonly, u8 level) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4998 | { |
Sean Christopherson | 552c69b1 | 2019-03-07 15:27:43 -0800 | [diff] [blame] | 4999 | union kvm_mmu_role role = {0}; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5000 | |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5001 | /* SMM flag is inherited from root_mmu */ |
| 5002 | role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5003 | |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5004 | role.base.level = level; |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5005 | role.base.gpte_is_8_bytes = true; |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5006 | role.base.direct = false; |
| 5007 | role.base.ad_disabled = !accessed_dirty; |
| 5008 | role.base.guest_mode = true; |
| 5009 | role.base.access = ACC_ALL; |
| 5010 | |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5011 | /* |
| 5012 | * WP=1 and NOT_WP=1 is an impossible combination, use WP and the |
| 5013 | * SMAP variation to denote shadow EPT entries. |
| 5014 | */ |
| 5015 | role.base.cr0_wp = true; |
| 5016 | role.base.smap_andnot_wp = true; |
| 5017 | |
Sean Christopherson | 552c69b1 | 2019-03-07 15:27:43 -0800 | [diff] [blame] | 5018 | role.ext = kvm_calc_mmu_role_ext(vcpu); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5019 | role.ext.execonly = execonly; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5020 | |
| 5021 | return role; |
| 5022 | } |
| 5023 | |
Paolo Bonzini | ae1e2d1 | 2017-03-30 11:55:30 +0200 | [diff] [blame] | 5024 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 5025 | bool accessed_dirty, gpa_t new_eptp) |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5026 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5027 | struct kvm_mmu *context = vcpu->arch.mmu; |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5028 | u8 level = vmx_eptp_page_walk_level(new_eptp); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5029 | union kvm_mmu_role new_role = |
| 5030 | kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5031 | execonly, level); |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 5032 | |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5033 | __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false); |
| 5034 | |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5035 | if (new_role.as_u64 == context->mmu_role.as_u64) |
| 5036 | return; |
| 5037 | |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5038 | context->shadow_root_level = level; |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5039 | |
| 5040 | context->nx = true; |
Paolo Bonzini | ae1e2d1 | 2017-03-30 11:55:30 +0200 | [diff] [blame] | 5041 | context->ept_ad = accessed_dirty; |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5042 | context->page_fault = ept_page_fault; |
| 5043 | context->gva_to_gpa = ept_gva_to_gpa; |
| 5044 | context->sync_page = ept_sync_page; |
| 5045 | context->invlpg = ept_invlpg; |
| 5046 | context->update_pte = ept_update_pte; |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5047 | context->root_level = level; |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5048 | context->direct_map = false; |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5049 | context->mmu_role.as_u64 = new_role.as_u64; |
Vitaly Kuznetsov | 3dc773e | 2018-10-08 21:28:06 +0200 | [diff] [blame] | 5050 | |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5051 | update_permission_bitmask(vcpu, context, true); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 5052 | update_pkru_bitmask(vcpu, context, true); |
Ladi Prosek | fd19d3b4 | 2017-10-05 11:10:22 +0200 | [diff] [blame] | 5053 | update_last_nonleaf_level(vcpu, context); |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5054 | reset_rsvds_bits_mask_ept(vcpu, context, execonly); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 5055 | reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5056 | } |
| 5057 | EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); |
| 5058 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 5059 | static void init_kvm_softmmu(struct kvm_vcpu *vcpu) |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 5060 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5061 | struct kvm_mmu *context = vcpu->arch.mmu; |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 5062 | |
| 5063 | kvm_init_shadow_mmu(vcpu); |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 5064 | context->get_guest_pgd = get_cr3; |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 5065 | context->get_pdptr = kvm_pdptr_read; |
| 5066 | context->inject_page_fault = kvm_inject_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5067 | } |
| 5068 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 5069 | static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5070 | { |
Vitaly Kuznetsov | bf627a9 | 2018-10-08 21:28:13 +0200 | [diff] [blame] | 5071 | union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5072 | struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; |
| 5073 | |
Vitaly Kuznetsov | bf627a9 | 2018-10-08 21:28:13 +0200 | [diff] [blame] | 5074 | if (new_role.as_u64 == g_context->mmu_role.as_u64) |
| 5075 | return; |
| 5076 | |
| 5077 | g_context->mmu_role.as_u64 = new_role.as_u64; |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 5078 | g_context->get_guest_pgd = get_cr3; |
Avi Kivity | e4e517b | 2011-07-28 11:36:17 +0300 | [diff] [blame] | 5079 | g_context->get_pdptr = kvm_pdptr_read; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5080 | g_context->inject_page_fault = kvm_inject_page_fault; |
| 5081 | |
| 5082 | /* |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5083 | * L2 page tables are never shadowed, so there is no need to sync |
| 5084 | * SPTEs. |
| 5085 | */ |
| 5086 | g_context->invlpg = NULL; |
| 5087 | |
| 5088 | /* |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5089 | * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using |
David Matlack | 0af2593 | 2015-12-30 08:26:17 -0800 | [diff] [blame] | 5090 | * L1's nested page tables (e.g. EPT12). The nested translation |
| 5091 | * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using |
| 5092 | * L2's page tables as the first level of translation and L1's |
| 5093 | * nested page tables as the second level of translation. Basically |
| 5094 | * the gva_to_gpa functions between mmu and nested_mmu are swapped. |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5095 | */ |
| 5096 | if (!is_paging(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 5097 | g_context->nx = false; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5098 | g_context->root_level = 0; |
| 5099 | g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; |
| 5100 | } else if (is_long_mode(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 5101 | g_context->nx = is_nx(vcpu); |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 5102 | g_context->root_level = is_la57_mode(vcpu) ? |
| 5103 | PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 5104 | reset_rsvds_bits_mask(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5105 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
| 5106 | } else if (is_pae(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 5107 | g_context->nx = is_nx(vcpu); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5108 | g_context->root_level = PT32E_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 5109 | reset_rsvds_bits_mask(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5110 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
| 5111 | } else { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 5112 | g_context->nx = false; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5113 | g_context->root_level = PT32_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 5114 | reset_rsvds_bits_mask(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5115 | g_context->gva_to_gpa = paging32_gva_to_gpa_nested; |
| 5116 | } |
| 5117 | |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 5118 | update_permission_bitmask(vcpu, g_context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 5119 | update_pkru_bitmask(vcpu, g_context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 5120 | update_last_nonleaf_level(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5121 | } |
| 5122 | |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 5123 | void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 5124 | { |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 5125 | if (reset_roots) { |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5126 | uint i; |
| 5127 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5128 | vcpu->arch.mmu->root_hpa = INVALID_PAGE; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5129 | |
| 5130 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5131 | vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 5132 | } |
| 5133 | |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5134 | if (mmu_is_nested(vcpu)) |
Paolo Bonzini | e0c6db3 | 2014-12-23 13:39:46 +0100 | [diff] [blame] | 5135 | init_kvm_nested_mmu(vcpu); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5136 | else if (tdp_enabled) |
Paolo Bonzini | e0c6db3 | 2014-12-23 13:39:46 +0100 | [diff] [blame] | 5137 | init_kvm_tdp_mmu(vcpu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 5138 | else |
Paolo Bonzini | e0c6db3 | 2014-12-23 13:39:46 +0100 | [diff] [blame] | 5139 | init_kvm_softmmu(vcpu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 5140 | } |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 5141 | EXPORT_SYMBOL_GPL(kvm_init_mmu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 5142 | |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5143 | static union kvm_mmu_page_role |
| 5144 | kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu) |
| 5145 | { |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 5146 | union kvm_mmu_role role; |
| 5147 | |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5148 | if (tdp_enabled) |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 5149 | role = kvm_calc_tdp_mmu_root_page_role(vcpu, true); |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5150 | else |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 5151 | role = kvm_calc_shadow_mmu_root_page_role(vcpu, true); |
| 5152 | |
| 5153 | return role.base; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5154 | } |
Dong, Eddie | 489f1d6 | 2008-01-07 11:14:20 +0200 | [diff] [blame] | 5155 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 5156 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 5157 | { |
Paolo Bonzini | 95f93af | 2013-10-02 16:56:12 +0200 | [diff] [blame] | 5158 | kvm_mmu_unload(vcpu); |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 5159 | kvm_init_mmu(vcpu, true); |
Eddie Dong | 8668a3c | 2007-10-10 14:26:45 +0800 | [diff] [blame] | 5160 | } |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 5161 | EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); |
| 5162 | |
| 5163 | int kvm_mmu_load(struct kvm_vcpu *vcpu) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 5164 | { |
| 5165 | int r; |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 5166 | |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 5167 | r = mmu_topup_memory_caches(vcpu); |
| 5168 | if (r) |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 5169 | goto out; |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 5170 | r = mmu_alloc_roots(vcpu); |
Takuya Yoshikawa | e2858b4 | 2013-05-09 15:45:12 +0900 | [diff] [blame] | 5171 | kvm_mmu_sync_roots(vcpu); |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 5172 | if (r) |
| 5173 | goto out; |
Paolo Bonzini | 727a7e2 | 2020-03-05 03:52:50 -0500 | [diff] [blame] | 5174 | kvm_mmu_load_pgd(vcpu); |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 5175 | kvm_x86_ops.tlb_flush_current(vcpu); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 5176 | out: |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5177 | return r; |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 5178 | } |
| 5179 | EXPORT_SYMBOL_GPL(kvm_mmu_load); |
| 5180 | |
| 5181 | void kvm_mmu_unload(struct kvm_vcpu *vcpu) |
| 5182 | { |
Vitaly Kuznetsov | 14c07ad | 2018-10-08 21:28:08 +0200 | [diff] [blame] | 5183 | kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); |
| 5184 | WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); |
| 5185 | kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); |
| 5186 | WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5187 | } |
Joerg Roedel | 4b16184 | 2010-09-10 17:31:03 +0200 | [diff] [blame] | 5188 | EXPORT_SYMBOL_GPL(kvm_mmu_unload); |
Avi Kivity | 09072da | 2007-05-01 14:16:52 +0300 | [diff] [blame] | 5189 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 5190 | static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, |
Xiao Guangrong | 7c56252 | 2011-03-28 10:29:27 +0800 | [diff] [blame] | 5191 | struct kvm_mmu_page *sp, u64 *spte, |
| 5192 | const void *new) |
Avi Kivity | 0028425 | 2007-05-01 16:53:31 +0300 | [diff] [blame] | 5193 | { |
Marcelo Tosatti | 3094538 | 2008-06-11 20:32:40 -0300 | [diff] [blame] | 5194 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) { |
Joerg Roedel | 7e4e405 | 2009-07-27 16:30:46 +0200 | [diff] [blame] | 5195 | ++vcpu->kvm->stat.mmu_pde_zapped; |
| 5196 | return; |
Marcelo Tosatti | 3094538 | 2008-06-11 20:32:40 -0300 | [diff] [blame] | 5197 | } |
Avi Kivity | 0028425 | 2007-05-01 16:53:31 +0300 | [diff] [blame] | 5198 | |
Avi Kivity | 4cee576 | 2007-11-18 16:37:07 +0200 | [diff] [blame] | 5199 | ++vcpu->kvm->stat.mmu_pte_updated; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5200 | vcpu->arch.mmu->update_pte(vcpu, sp, spte, new); |
Avi Kivity | 0028425 | 2007-05-01 16:53:31 +0300 | [diff] [blame] | 5201 | } |
| 5202 | |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 5203 | static bool need_remote_flush(u64 old, u64 new) |
| 5204 | { |
| 5205 | if (!is_shadow_present_pte(old)) |
| 5206 | return false; |
| 5207 | if (!is_shadow_present_pte(new)) |
| 5208 | return true; |
| 5209 | if ((old ^ new) & PT64_BASE_ADDR_MASK) |
| 5210 | return true; |
Gleb Natapov | 5316622 | 2013-08-05 11:07:14 +0300 | [diff] [blame] | 5211 | old ^= shadow_nx_mask; |
| 5212 | new ^= shadow_nx_mask; |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 5213 | return (old & ~new & PT64_PERM_MASK) != 0; |
| 5214 | } |
| 5215 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5216 | static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 5217 | int *bytes) |
Avi Kivity | da4a00f | 2007-01-05 16:36:44 -0800 | [diff] [blame] | 5218 | { |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 5219 | u64 gentry = 0; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5220 | int r; |
Avi Kivity | 72016f3 | 2010-03-15 13:59:53 +0200 | [diff] [blame] | 5221 | |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 5222 | /* |
| 5223 | * Assume that the pte write on a page table of the same type |
Xiao Guangrong | 49b26e2 | 2011-03-04 19:00:00 +0800 | [diff] [blame] | 5224 | * as the current vcpu paging mode since we update the sptes only |
| 5225 | * when they have the same mode. |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 5226 | */ |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5227 | if (is_pae(vcpu) && *bytes == 4) { |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 5228 | /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5229 | *gpa &= ~(gpa_t)7; |
| 5230 | *bytes = 8; |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 5231 | } |
| 5232 | |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 5233 | if (*bytes == 4 || *bytes == 8) { |
| 5234 | r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes); |
| 5235 | if (r) |
| 5236 | gentry = 0; |
Avi Kivity | 72016f3 | 2010-03-15 13:59:53 +0200 | [diff] [blame] | 5237 | } |
| 5238 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5239 | return gentry; |
| 5240 | } |
| 5241 | |
| 5242 | /* |
| 5243 | * If we're seeing too many writes to a page, it may no longer be a page table, |
| 5244 | * or we may be forking, in which case it is better to unmap the page. |
| 5245 | */ |
Xiao Guangrong | a138fe7 | 2011-12-16 18:18:10 +0800 | [diff] [blame] | 5246 | static bool detect_write_flooding(struct kvm_mmu_page *sp) |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5247 | { |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 5248 | /* |
| 5249 | * Skip write-flooding detected for the sp whose level is 1, because |
| 5250 | * it can become unsync, then the guest page is not write-protected. |
| 5251 | */ |
Davidlohr Bueso | f71fa31 | 2012-04-18 12:24:29 +0200 | [diff] [blame] | 5252 | if (sp->role.level == PT_PAGE_TABLE_LEVEL) |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 5253 | return false; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5254 | |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 5255 | atomic_inc(&sp->write_flooding_count); |
| 5256 | return atomic_read(&sp->write_flooding_count) >= 3; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5257 | } |
| 5258 | |
| 5259 | /* |
| 5260 | * Misaligned accesses are too much trouble to fix up; also, they usually |
| 5261 | * indicate a page is not used as a page table. |
| 5262 | */ |
| 5263 | static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, |
| 5264 | int bytes) |
| 5265 | { |
| 5266 | unsigned offset, pte_size, misaligned; |
| 5267 | |
| 5268 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", |
| 5269 | gpa, bytes, sp->role.word); |
| 5270 | |
| 5271 | offset = offset_in_page(gpa); |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5272 | pte_size = sp->role.gpte_is_8_bytes ? 8 : 4; |
Xiao Guangrong | 5d9ca30 | 2011-09-22 16:57:55 +0800 | [diff] [blame] | 5273 | |
| 5274 | /* |
| 5275 | * Sometimes, the OS only writes the last one bytes to update status |
| 5276 | * bits, for example, in linux, andb instruction is used in clear_bit(). |
| 5277 | */ |
| 5278 | if (!(offset & (pte_size - 1)) && bytes == 1) |
| 5279 | return false; |
| 5280 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5281 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); |
| 5282 | misaligned |= bytes < 4; |
| 5283 | |
| 5284 | return misaligned; |
| 5285 | } |
| 5286 | |
| 5287 | static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) |
| 5288 | { |
| 5289 | unsigned page_offset, quadrant; |
| 5290 | u64 *spte; |
| 5291 | int level; |
| 5292 | |
| 5293 | page_offset = offset_in_page(gpa); |
| 5294 | level = sp->role.level; |
| 5295 | *nspte = 1; |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5296 | if (!sp->role.gpte_is_8_bytes) { |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5297 | page_offset <<= 1; /* 32->64 */ |
| 5298 | /* |
| 5299 | * A 32-bit pde maps 4MB while the shadow pdes map |
| 5300 | * only 2MB. So we need to double the offset again |
| 5301 | * and zap two pdes instead of one. |
| 5302 | */ |
| 5303 | if (level == PT32_ROOT_LEVEL) { |
| 5304 | page_offset &= ~7; /* kill rounding error */ |
| 5305 | page_offset <<= 1; |
| 5306 | *nspte = 2; |
| 5307 | } |
| 5308 | quadrant = page_offset >> PAGE_SHIFT; |
| 5309 | page_offset &= ~PAGE_MASK; |
| 5310 | if (quadrant != sp->role.quadrant) |
| 5311 | return NULL; |
| 5312 | } |
| 5313 | |
| 5314 | spte = &sp->spt[page_offset / sizeof(*spte)]; |
| 5315 | return spte; |
| 5316 | } |
| 5317 | |
Sean Christopherson | a102a67 | 2020-03-02 18:02:34 -0800 | [diff] [blame] | 5318 | /* |
| 5319 | * Ignore various flags when determining if a SPTE can be immediately |
| 5320 | * overwritten for the current MMU. |
| 5321 | * - level: explicitly checked in mmu_pte_write_new_pte(), and will never |
| 5322 | * match the current MMU role, as MMU's level tracks the root level. |
| 5323 | * - access: updated based on the new guest PTE |
| 5324 | * - quadrant: handled by get_written_sptes() |
| 5325 | * - invalid: always false (loop only walks valid shadow pages) |
| 5326 | */ |
| 5327 | static const union kvm_mmu_page_role role_ign = { |
| 5328 | .level = 0xf, |
| 5329 | .access = 0x7, |
| 5330 | .quadrant = 0x3, |
| 5331 | .invalid = 0x1, |
| 5332 | }; |
| 5333 | |
Xiao Guangrong | 13d268c | 2016-02-24 17:51:16 +0800 | [diff] [blame] | 5334 | static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
Jike Song | d126363 | 2016-10-25 15:50:42 +0800 | [diff] [blame] | 5335 | const u8 *new, int bytes, |
| 5336 | struct kvm_page_track_notifier_node *node) |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5337 | { |
| 5338 | gfn_t gfn = gpa >> PAGE_SHIFT; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5339 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5340 | LIST_HEAD(invalid_list); |
| 5341 | u64 entry, gentry, *spte; |
| 5342 | int npte; |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5343 | bool remote_flush, local_flush; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5344 | |
| 5345 | /* |
| 5346 | * If we don't have indirect shadow pages, it means no page is |
| 5347 | * write-protected, so we can exit simply. |
| 5348 | */ |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 5349 | if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5350 | return; |
| 5351 | |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5352 | remote_flush = local_flush = false; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5353 | |
| 5354 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
| 5355 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5356 | /* |
| 5357 | * No need to care whether allocation memory is successful |
| 5358 | * or not since pte prefetch is skiped if it does not have |
| 5359 | * enough objects in the cache. |
| 5360 | */ |
| 5361 | mmu_topup_memory_caches(vcpu); |
| 5362 | |
| 5363 | spin_lock(&vcpu->kvm->mmu_lock); |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 5364 | |
| 5365 | gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); |
| 5366 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5367 | ++vcpu->kvm->stat.mmu_pte_write; |
Xiao Guangrong | 0375f7f | 2011-11-28 20:41:00 +0800 | [diff] [blame] | 5368 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5369 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 5370 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 5371 | if (detect_write_misaligned(sp, gpa, bytes) || |
Xiao Guangrong | a138fe7 | 2011-12-16 18:18:10 +0800 | [diff] [blame] | 5372 | detect_write_flooding(sp)) { |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5373 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); |
Avi Kivity | 4cee576 | 2007-11-18 16:37:07 +0200 | [diff] [blame] | 5374 | ++vcpu->kvm->stat.mmu_flooded; |
Avi Kivity | 0e7bc4b | 2007-01-05 16:36:48 -0800 | [diff] [blame] | 5375 | continue; |
| 5376 | } |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5377 | |
| 5378 | spte = get_written_sptes(sp, gpa, &npte); |
| 5379 | if (!spte) |
| 5380 | continue; |
| 5381 | |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 5382 | local_flush = true; |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 5383 | while (npte--) { |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 5384 | u32 base_role = vcpu->arch.mmu->mmu_role.base.word; |
| 5385 | |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 5386 | entry = *spte; |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 5387 | mmu_page_zap_pte(vcpu->kvm, sp, spte); |
Xiao Guangrong | fa1de2b | 2010-07-16 11:19:51 +0800 | [diff] [blame] | 5388 | if (gentry && |
Sean Christopherson | a102a67 | 2020-03-02 18:02:34 -0800 | [diff] [blame] | 5389 | !((sp->role.word ^ base_role) & ~role_ign.word) && |
| 5390 | rmap_can_add(vcpu)) |
Xiao Guangrong | 7c56252 | 2011-03-28 10:29:27 +0800 | [diff] [blame] | 5391 | mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); |
Gleb Natapov | 9bb4f6b | 2013-01-30 16:45:01 +0200 | [diff] [blame] | 5392 | if (need_remote_flush(entry, *spte)) |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 5393 | remote_flush = true; |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 5394 | ++spte; |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5395 | } |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5396 | } |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5397 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); |
Xiao Guangrong | 0375f7f | 2011-11-28 20:41:00 +0800 | [diff] [blame] | 5398 | kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 5399 | spin_unlock(&vcpu->kvm->mmu_lock); |
Avi Kivity | da4a00f | 2007-01-05 16:36:44 -0800 | [diff] [blame] | 5400 | } |
| 5401 | |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5402 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) |
| 5403 | { |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 5404 | gpa_t gpa; |
| 5405 | int r; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5406 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5407 | if (vcpu->arch.mmu->direct_map) |
Avi Kivity | 60f2478 | 2009-08-27 13:37:06 +0300 | [diff] [blame] | 5408 | return 0; |
| 5409 | |
Gleb Natapov | 1871c60 | 2010-02-10 14:21:32 +0200 | [diff] [blame] | 5410 | gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 5411 | |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 5412 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 5413 | |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 5414 | return r; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5415 | } |
Avi Kivity | 577bdc4 | 2008-07-19 08:57:05 +0300 | [diff] [blame] | 5416 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5417 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5418 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, |
Andre Przywara | dc25e89 | 2010-12-21 11:12:07 +0100 | [diff] [blame] | 5419 | void *insn, int insn_len) |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5420 | { |
Sean Christopherson | 92daa48 | 2020-02-18 15:03:08 -0800 | [diff] [blame] | 5421 | int r, emulation_type = EMULTYPE_PF; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5422 | bool direct = vcpu->arch.mmu->direct_map; |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5423 | |
Sean Christopherson | 6948199 | 2019-12-06 15:57:29 -0800 | [diff] [blame] | 5424 | if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) |
Sean Christopherson | ddce620 | 2019-12-06 15:57:27 -0800 | [diff] [blame] | 5425 | return RET_PF_RETRY; |
| 5426 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 5427 | r = RET_PF_INVALID; |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5428 | if (unlikely(error_code & PFERR_RSVD_MASK)) { |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5429 | r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct); |
Sean Christopherson | 472faff | 2018-08-23 13:56:50 -0700 | [diff] [blame] | 5430 | if (r == RET_PF_EMULATE) |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5431 | goto emulate; |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5432 | } |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5433 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 5434 | if (r == RET_PF_INVALID) { |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 5435 | r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, |
| 5436 | lower_32_bits(error_code), false); |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 5437 | WARN_ON(r == RET_PF_INVALID); |
| 5438 | } |
| 5439 | |
| 5440 | if (r == RET_PF_RETRY) |
| 5441 | return 1; |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5442 | if (r < 0) |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5443 | return r; |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5444 | |
Tom Lendacky | 1472775 | 2016-11-23 12:01:38 -0500 | [diff] [blame] | 5445 | /* |
| 5446 | * Before emulating the instruction, check if the error code |
| 5447 | * was due to a RO violation while translating the guest page. |
| 5448 | * This can occur when using nested virtualization with nested |
| 5449 | * paging in both guests. If true, we simply unprotect the page |
| 5450 | * and resume the guest. |
Tom Lendacky | 1472775 | 2016-11-23 12:01:38 -0500 | [diff] [blame] | 5451 | */ |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5452 | if (vcpu->arch.mmu->direct_map && |
Paolo Bonzini | eebed24 | 2016-11-28 14:39:58 +0100 | [diff] [blame] | 5453 | (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5454 | kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)); |
Tom Lendacky | 1472775 | 2016-11-23 12:01:38 -0500 | [diff] [blame] | 5455 | return 1; |
| 5456 | } |
| 5457 | |
Sean Christopherson | 472faff | 2018-08-23 13:56:50 -0700 | [diff] [blame] | 5458 | /* |
| 5459 | * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still |
| 5460 | * optimistically try to just unprotect the page and let the processor |
| 5461 | * re-execute the instruction that caused the page fault. Do not allow |
| 5462 | * retrying MMIO emulation, as it's not only pointless but could also |
| 5463 | * cause us to enter an infinite loop because the processor will keep |
Sean Christopherson | 6c3dfeb | 2018-08-23 13:56:51 -0700 | [diff] [blame] | 5464 | * faulting on the non-existent MMIO address. Retrying an instruction |
| 5465 | * from a nested guest is also pointless and dangerous as we are only |
| 5466 | * explicitly shadowing L1's page tables, i.e. unprotecting something |
| 5467 | * for L1 isn't going to magically fix whatever issue cause L2 to fail. |
Sean Christopherson | 472faff | 2018-08-23 13:56:50 -0700 | [diff] [blame] | 5468 | */ |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5469 | if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu)) |
Sean Christopherson | 92daa48 | 2020-02-18 15:03:08 -0800 | [diff] [blame] | 5470 | emulation_type |= EMULTYPE_ALLOW_RETRY_PF; |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5471 | emulate: |
Brijesh Singh | 00b10fe | 2017-12-04 10:57:40 -0600 | [diff] [blame] | 5472 | /* |
| 5473 | * On AMD platforms, under certain conditions insn_len may be zero on #NPF. |
| 5474 | * This can happen if a guest gets a page-fault on data access but the HW |
| 5475 | * table walker is not able to read the instruction page (e.g instruction |
| 5476 | * page is not present in memory). In those cases we simply restart the |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 5477 | * guest, with the exception of AMD Erratum 1096 which is unrecoverable. |
Brijesh Singh | 00b10fe | 2017-12-04 10:57:40 -0600 | [diff] [blame] | 5478 | */ |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 5479 | if (unlikely(insn && !insn_len)) { |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 5480 | if (!kvm_x86_ops.need_emulation_on_page_fault(vcpu)) |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 5481 | return 1; |
| 5482 | } |
Brijesh Singh | 00b10fe | 2017-12-04 10:57:40 -0600 | [diff] [blame] | 5483 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5484 | return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 5485 | insn_len); |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5486 | } |
| 5487 | EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); |
| 5488 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5489 | void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 5490 | gva_t gva, hpa_t root_hpa) |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 5491 | { |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5492 | int i; |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 5493 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5494 | /* It's actually a GPA for vcpu->arch.guest_mmu. */ |
| 5495 | if (mmu != &vcpu->arch.guest_mmu) { |
| 5496 | /* INVLPG on a non-canonical address is a NOP according to the SDM. */ |
| 5497 | if (is_noncanonical_address(gva, vcpu)) |
| 5498 | return; |
| 5499 | |
| 5500 | kvm_x86_ops.tlb_flush_gva(vcpu, gva); |
| 5501 | } |
| 5502 | |
| 5503 | if (!mmu->invlpg) |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5504 | return; |
| 5505 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5506 | if (root_hpa == INVALID_PAGE) { |
| 5507 | mmu->invlpg(vcpu, gva, mmu->root_hpa); |
Junaid Shahid | 956bf35 | 2018-06-27 14:59:18 -0700 | [diff] [blame] | 5508 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5509 | /* |
| 5510 | * INVLPG is required to invalidate any global mappings for the VA, |
| 5511 | * irrespective of PCID. Since it would take us roughly similar amount |
| 5512 | * of work to determine whether any of the prev_root mappings of the VA |
| 5513 | * is marked global, or to just sync it blindly, so we might as well |
| 5514 | * just always sync it. |
| 5515 | * |
| 5516 | * Mappings not reachable via the current cr3 or the prev_roots will be |
| 5517 | * synced when switching to that cr3, so nothing needs to be done here |
| 5518 | * for them. |
| 5519 | */ |
| 5520 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 5521 | if (VALID_PAGE(mmu->prev_roots[i].hpa)) |
| 5522 | mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); |
| 5523 | } else { |
| 5524 | mmu->invlpg(vcpu, gva, root_hpa); |
| 5525 | } |
| 5526 | } |
| 5527 | EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva); |
Junaid Shahid | 956bf35 | 2018-06-27 14:59:18 -0700 | [diff] [blame] | 5528 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5529 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) |
| 5530 | { |
| 5531 | kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE); |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 5532 | ++vcpu->stat.invlpg; |
| 5533 | } |
| 5534 | EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); |
| 5535 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5536 | |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5537 | void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) |
| 5538 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5539 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5540 | bool tlb_flush = false; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5541 | uint i; |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5542 | |
| 5543 | if (pcid == kvm_get_active_pcid(vcpu)) { |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 5544 | mmu->invlpg(vcpu, gva, mmu->root_hpa); |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5545 | tlb_flush = true; |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5546 | } |
| 5547 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5548 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { |
| 5549 | if (VALID_PAGE(mmu->prev_roots[i].hpa) && |
| 5550 | pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) { |
| 5551 | mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); |
| 5552 | tlb_flush = true; |
| 5553 | } |
Junaid Shahid | 956bf35 | 2018-06-27 14:59:18 -0700 | [diff] [blame] | 5554 | } |
Junaid Shahid | ade61e2 | 2018-06-27 14:59:15 -0700 | [diff] [blame] | 5555 | |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5556 | if (tlb_flush) |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 5557 | kvm_x86_ops.tlb_flush_gva(vcpu, gva); |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5558 | |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5559 | ++vcpu->stat.invlpg; |
| 5560 | |
| 5561 | /* |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5562 | * Mappings not reachable via the current cr3 or the prev_roots will be |
| 5563 | * synced when switching to that cr3, so nothing needs to be done here |
| 5564 | * for them. |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5565 | */ |
| 5566 | } |
| 5567 | EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva); |
| 5568 | |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 5569 | void kvm_configure_mmu(bool enable_tdp, int tdp_page_level) |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 5570 | { |
Sean Christopherson | bde7723 | 2020-03-02 15:57:02 -0800 | [diff] [blame] | 5571 | tdp_enabled = enable_tdp; |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 5572 | |
| 5573 | /* |
| 5574 | * max_page_level reflects the capabilities of KVM's MMU irrespective |
| 5575 | * of kernel support, e.g. KVM may be capable of using 1GB pages when |
| 5576 | * the kernel is not. But, KVM never creates a page size greater than |
| 5577 | * what is used by the kernel for any given HVA, i.e. the kernel's |
| 5578 | * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust(). |
| 5579 | */ |
| 5580 | if (tdp_enabled) |
| 5581 | max_page_level = tdp_page_level; |
| 5582 | else if (boot_cpu_has(X86_FEATURE_GBPAGES)) |
| 5583 | max_page_level = PT_PDPE_LEVEL; |
| 5584 | else |
| 5585 | max_page_level = PT_DIRECTORY_LEVEL; |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 5586 | } |
Sean Christopherson | bde7723 | 2020-03-02 15:57:02 -0800 | [diff] [blame] | 5587 | EXPORT_SYMBOL_GPL(kvm_configure_mmu); |
Xiao Guangrong | 13d268c | 2016-02-24 17:51:16 +0800 | [diff] [blame] | 5588 | |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5589 | /* The return value indicates if tlb flush on all vcpus is needed. */ |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5590 | typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5591 | |
| 5592 | /* The caller should hold mmu-lock before calling this function. */ |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5593 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5594 | slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5595 | slot_level_handler fn, int start_level, int end_level, |
| 5596 | gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) |
| 5597 | { |
| 5598 | struct slot_rmap_walk_iterator iterator; |
| 5599 | bool flush = false; |
| 5600 | |
| 5601 | for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, |
| 5602 | end_gfn, &iterator) { |
| 5603 | if (iterator.rmap) |
| 5604 | flush |= fn(kvm, iterator.rmap); |
| 5605 | |
| 5606 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { |
| 5607 | if (flush && lock_flush_tlb) { |
Ben Gardon | f285c63 | 2019-03-12 11:45:59 -0700 | [diff] [blame] | 5608 | kvm_flush_remote_tlbs_with_address(kvm, |
| 5609 | start_gfn, |
| 5610 | iterator.gfn - start_gfn + 1); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5611 | flush = false; |
| 5612 | } |
| 5613 | cond_resched_lock(&kvm->mmu_lock); |
| 5614 | } |
| 5615 | } |
| 5616 | |
| 5617 | if (flush && lock_flush_tlb) { |
Ben Gardon | f285c63 | 2019-03-12 11:45:59 -0700 | [diff] [blame] | 5618 | kvm_flush_remote_tlbs_with_address(kvm, start_gfn, |
| 5619 | end_gfn - start_gfn + 1); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5620 | flush = false; |
| 5621 | } |
| 5622 | |
| 5623 | return flush; |
| 5624 | } |
| 5625 | |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5626 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5627 | slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5628 | slot_level_handler fn, int start_level, int end_level, |
| 5629 | bool lock_flush_tlb) |
| 5630 | { |
| 5631 | return slot_handle_level_range(kvm, memslot, fn, start_level, |
| 5632 | end_level, memslot->base_gfn, |
| 5633 | memslot->base_gfn + memslot->npages - 1, |
| 5634 | lock_flush_tlb); |
| 5635 | } |
| 5636 | |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5637 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5638 | slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5639 | slot_level_handler fn, bool lock_flush_tlb) |
| 5640 | { |
| 5641 | return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, |
| 5642 | PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); |
| 5643 | } |
| 5644 | |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5645 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5646 | slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5647 | slot_level_handler fn, bool lock_flush_tlb) |
| 5648 | { |
| 5649 | return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1, |
| 5650 | PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); |
| 5651 | } |
| 5652 | |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5653 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5654 | slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5655 | slot_level_handler fn, bool lock_flush_tlb) |
| 5656 | { |
| 5657 | return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL, |
| 5658 | PT_PAGE_TABLE_LEVEL, lock_flush_tlb); |
| 5659 | } |
| 5660 | |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5661 | static void free_mmu_pages(struct kvm_mmu *mmu) |
Takuya Yoshikawa | b99db1d | 2013-01-08 19:44:48 +0900 | [diff] [blame] | 5662 | { |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5663 | free_page((unsigned long)mmu->pae_root); |
| 5664 | free_page((unsigned long)mmu->lm_root); |
Takuya Yoshikawa | 6b81b05 | 2013-01-08 19:47:33 +0900 | [diff] [blame] | 5665 | } |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5666 | |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5667 | static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
Avi Kivity | 8234b22 | 2010-12-27 12:08:45 +0200 | [diff] [blame] | 5668 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5669 | struct page *page; |
Takuya Yoshikawa | b99db1d | 2013-01-08 19:44:48 +0900 | [diff] [blame] | 5670 | int i; |
Takuya Yoshikawa | 9d1beef | 2013-01-08 19:46:48 +0900 | [diff] [blame] | 5671 | |
Sean Christopherson | b6b80c7 | 2019-06-13 10:22:23 -0700 | [diff] [blame] | 5672 | /* |
| 5673 | * When using PAE paging, the four PDPTEs are treated as 'root' pages, |
| 5674 | * while the PDP table is a per-vCPU construct that's allocated at MMU |
| 5675 | * creation. When emulating 32-bit mode, cr3 is only 32 bits even on |
| 5676 | * x86_64. Therefore we need to allocate the PDP table in the first |
| 5677 | * 4GB of memory, which happens to fit the DMA32 zone. Except for |
| 5678 | * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can |
| 5679 | * skip allocating the PDP table. |
| 5680 | */ |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 5681 | if (tdp_enabled && kvm_x86_ops.get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5682 | return 0; |
| 5683 | |
Ben Gardon | 254272c | 2019-02-11 11:02:50 -0800 | [diff] [blame] | 5684 | page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32); |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5685 | if (!page) |
| 5686 | return -ENOMEM; |
| 5687 | |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5688 | mmu->pae_root = page_address(page); |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5689 | for (i = 0; i < 4; ++i) |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5690 | mmu->pae_root[i] = INVALID_PAGE; |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5691 | |
| 5692 | return 0; |
| 5693 | } |
| 5694 | |
Kai Huang | d91ffee | 2015-01-12 15:28:54 +0800 | [diff] [blame] | 5695 | int kvm_mmu_create(struct kvm_vcpu *vcpu) |
| 5696 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5697 | uint i; |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5698 | int ret; |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 5699 | |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5700 | vcpu->arch.mmu = &vcpu->arch.root_mmu; |
| 5701 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; |
| 5702 | |
| 5703 | vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; |
Linus Torvalds | 636deed | 2019-03-15 15:00:28 -0700 | [diff] [blame] | 5704 | vcpu->arch.root_mmu.root_cr3 = 0; |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5705 | vcpu->arch.root_mmu.translate_gpa = translate_gpa; |
| 5706 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 5707 | vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; |
| 5708 | |
| 5709 | vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; |
Linus Torvalds | 636deed | 2019-03-15 15:00:28 -0700 | [diff] [blame] | 5710 | vcpu->arch.guest_mmu.root_cr3 = 0; |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 5711 | vcpu->arch.guest_mmu.translate_gpa = translate_gpa; |
| 5712 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5713 | vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; |
| 5714 | |
| 5715 | vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5716 | |
| 5717 | ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu); |
| 5718 | if (ret) |
| 5719 | return ret; |
| 5720 | |
| 5721 | ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu); |
| 5722 | if (ret) |
| 5723 | goto fail_allocate_root; |
| 5724 | |
| 5725 | return ret; |
| 5726 | fail_allocate_root: |
| 5727 | free_mmu_pages(&vcpu->arch.guest_mmu); |
| 5728 | return ret; |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5729 | } |
| 5730 | |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5731 | #define BATCH_ZAP_PAGES 10 |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5732 | static void kvm_zap_obsolete_pages(struct kvm *kvm) |
| 5733 | { |
| 5734 | struct kvm_mmu_page *sp, *node; |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5735 | int nr_zapped, batch = 0; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5736 | |
| 5737 | restart: |
| 5738 | list_for_each_entry_safe_reverse(sp, node, |
| 5739 | &kvm->arch.active_mmu_pages, link) { |
| 5740 | /* |
| 5741 | * No obsolete valid page exists before a newly created page |
| 5742 | * since active_mmu_pages is a FIFO list. |
| 5743 | */ |
| 5744 | if (!is_obsolete_sp(kvm, sp)) |
| 5745 | break; |
| 5746 | |
| 5747 | /* |
Sean Christopherson | 9a5c034 | 2019-09-12 19:46:12 -0700 | [diff] [blame] | 5748 | * Skip invalid pages with a non-zero root count, zapping pages |
| 5749 | * with a non-zero root count will never succeed, i.e. the page |
| 5750 | * will get thrown back on active_mmu_pages and we'll get stuck |
| 5751 | * in an infinite loop. |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5752 | */ |
Sean Christopherson | 9a5c034 | 2019-09-12 19:46:12 -0700 | [diff] [blame] | 5753 | if (sp->role.invalid && sp->root_count) |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5754 | continue; |
| 5755 | |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5756 | /* |
| 5757 | * No need to flush the TLB since we're only zapping shadow |
| 5758 | * pages with an obsolete generation number and all vCPUS have |
| 5759 | * loaded a new root, i.e. the shadow pages being zapped cannot |
| 5760 | * be in active use by the guest. |
| 5761 | */ |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5762 | if (batch >= BATCH_ZAP_PAGES && |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5763 | cond_resched_lock(&kvm->mmu_lock)) { |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5764 | batch = 0; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5765 | goto restart; |
| 5766 | } |
| 5767 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5768 | if (__kvm_mmu_prepare_zap_page(kvm, sp, |
| 5769 | &kvm->arch.zapped_obsolete_pages, &nr_zapped)) { |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5770 | batch += nr_zapped; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5771 | goto restart; |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5772 | } |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5773 | } |
| 5774 | |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5775 | /* |
| 5776 | * Trigger a remote TLB flush before freeing the page tables to ensure |
| 5777 | * KVM is not in the middle of a lockless shadow page table walk, which |
| 5778 | * may reference the pages. |
| 5779 | */ |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5780 | kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5781 | } |
| 5782 | |
| 5783 | /* |
| 5784 | * Fast invalidate all shadow pages and use lock-break technique |
| 5785 | * to zap obsolete pages. |
| 5786 | * |
| 5787 | * It's required when memslot is being deleted or VM is being |
| 5788 | * destroyed, in these cases, we should ensure that KVM MMU does |
| 5789 | * not use any resource of the being-deleted slot or all slots |
| 5790 | * after calling the function. |
| 5791 | */ |
| 5792 | static void kvm_mmu_zap_all_fast(struct kvm *kvm) |
| 5793 | { |
Sean Christopherson | ca333ad | 2019-09-12 19:46:11 -0700 | [diff] [blame] | 5794 | lockdep_assert_held(&kvm->slots_lock); |
| 5795 | |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5796 | spin_lock(&kvm->mmu_lock); |
Sean Christopherson | 14a3c4f | 2019-09-12 19:46:06 -0700 | [diff] [blame] | 5797 | trace_kvm_mmu_zap_all_fast(kvm); |
Sean Christopherson | ca333ad | 2019-09-12 19:46:11 -0700 | [diff] [blame] | 5798 | |
| 5799 | /* |
| 5800 | * Toggle mmu_valid_gen between '0' and '1'. Because slots_lock is |
| 5801 | * held for the entire duration of zapping obsolete pages, it's |
| 5802 | * impossible for there to be multiple invalid generations associated |
| 5803 | * with *valid* shadow pages at any given time, i.e. there is exactly |
| 5804 | * one valid generation and (at most) one invalid generation. |
| 5805 | */ |
| 5806 | kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5807 | |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5808 | /* |
| 5809 | * Notify all vcpus to reload its shadow page table and flush TLB. |
| 5810 | * Then all vcpus will switch to new shadow page table with the new |
| 5811 | * mmu_valid_gen. |
| 5812 | * |
| 5813 | * Note: we need to do this under the protection of mmu_lock, |
| 5814 | * otherwise, vcpu would purge shadow page but miss tlb flush. |
| 5815 | */ |
| 5816 | kvm_reload_remote_mmus(kvm); |
| 5817 | |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5818 | kvm_zap_obsolete_pages(kvm); |
| 5819 | spin_unlock(&kvm->mmu_lock); |
| 5820 | } |
| 5821 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5822 | static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) |
| 5823 | { |
| 5824 | return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); |
| 5825 | } |
| 5826 | |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5827 | static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, |
| 5828 | struct kvm_memory_slot *slot, |
| 5829 | struct kvm_page_track_notifier_node *node) |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5830 | { |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5831 | kvm_mmu_zap_all_fast(kvm); |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5832 | } |
| 5833 | |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5834 | void kvm_mmu_init_vm(struct kvm *kvm) |
| 5835 | { |
| 5836 | struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; |
| 5837 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5838 | node->track_write = kvm_mmu_pte_write; |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5839 | node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; |
| 5840 | kvm_page_track_register_notifier(kvm, node); |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5841 | } |
| 5842 | |
| 5843 | void kvm_mmu_uninit_vm(struct kvm *kvm) |
| 5844 | { |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5845 | struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5846 | |
| 5847 | kvm_page_track_unregister_notifier(kvm, node); |
| 5848 | } |
| 5849 | |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5850 | void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5851 | { |
| 5852 | struct kvm_memslots *slots; |
| 5853 | struct kvm_memory_slot *memslot; |
| 5854 | int i; |
| 5855 | |
| 5856 | spin_lock(&kvm->mmu_lock); |
| 5857 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 5858 | slots = __kvm_memslots(kvm, i); |
| 5859 | kvm_for_each_memslot(memslot, slots) { |
| 5860 | gfn_t start, end; |
| 5861 | |
| 5862 | start = max(gfn_start, memslot->base_gfn); |
| 5863 | end = min(gfn_end, memslot->base_gfn + memslot->npages); |
| 5864 | if (start >= end) |
| 5865 | continue; |
| 5866 | |
Ben Gardon | 92da008 | 2019-03-12 11:45:58 -0700 | [diff] [blame] | 5867 | slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, |
| 5868 | PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL, |
| 5869 | start, end - 1, true); |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5870 | } |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5871 | } |
| 5872 | |
| 5873 | spin_unlock(&kvm->mmu_lock); |
| 5874 | } |
| 5875 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5876 | static bool slot_rmap_write_protect(struct kvm *kvm, |
| 5877 | struct kvm_rmap_head *rmap_head) |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5878 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5879 | return __rmap_write_protect(kvm, rmap_head, false); |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5880 | } |
| 5881 | |
Dor Laor | e0fa826 | 2007-03-30 13:06:33 +0300 | [diff] [blame] | 5882 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 5883 | struct kvm_memory_slot *memslot, |
| 5884 | int start_level) |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 5885 | { |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5886 | bool flush; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 5887 | |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5888 | spin_lock(&kvm->mmu_lock); |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 5889 | flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, |
| 5890 | start_level, PT_MAX_HUGEPAGE_LEVEL, false); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5891 | spin_unlock(&kvm->mmu_lock); |
| 5892 | |
| 5893 | /* |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5894 | * We can flush all the TLBs out of the mmu lock without TLB |
| 5895 | * corruption since we just change the spte from writable to |
Xiao Guangrong | e7d11c7 | 2013-05-31 08:36:27 +0800 | [diff] [blame] | 5896 | * readonly so that we only need to care the case of changing |
| 5897 | * spte from present to present (changing the spte from present |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5898 | * to nonpresent will flush all the TLBs immediately), in other |
| 5899 | * words, the only case we care is mmu_spte_update() where we |
Wei Yang | bdd303c | 2018-11-05 14:45:03 +0800 | [diff] [blame] | 5900 | * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5901 | * instead of PT_WRITABLE_MASK, that means it does not depend |
| 5902 | * on PT_WRITABLE_MASK anymore. |
| 5903 | */ |
| 5904 | if (flush) |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 5905 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5906 | } |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5907 | |
| 5908 | static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5909 | struct kvm_rmap_head *rmap_head) |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5910 | { |
| 5911 | u64 *sptep; |
| 5912 | struct rmap_iterator iter; |
| 5913 | int need_tlb_flush = 0; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 5914 | kvm_pfn_t pfn; |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5915 | struct kvm_mmu_page *sp; |
| 5916 | |
| 5917 | restart: |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5918 | for_each_rmap_spte(rmap_head, &iter, sptep) { |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5919 | sp = page_header(__pa(sptep)); |
| 5920 | pfn = spte_to_pfn(*sptep); |
| 5921 | |
| 5922 | /* |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5923 | * We cannot do huge page mapping for indirect shadow pages, |
| 5924 | * which are found on the last rmap (level = 1) when not using |
| 5925 | * tdp; such shadow pages are synced with the page table in |
| 5926 | * the guest, and the guest page table is using 4K page size |
| 5927 | * mapping if the indirect sp has level = 1. |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5928 | */ |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 5929 | if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && |
Sean Christopherson | e851265 | 2020-01-08 12:24:48 -0800 | [diff] [blame] | 5930 | (kvm_is_zone_device_pfn(pfn) || |
| 5931 | PageCompound(pfn_to_page(pfn)))) { |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 5932 | pte_list_remove(rmap_head, sptep); |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 5933 | |
| 5934 | if (kvm_available_flush_tlb_with_range()) |
| 5935 | kvm_flush_remote_tlbs_with_address(kvm, sp->gfn, |
| 5936 | KVM_PAGES_PER_HPAGE(sp->role.level)); |
| 5937 | else |
| 5938 | need_tlb_flush = 1; |
| 5939 | |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 5940 | goto restart; |
| 5941 | } |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5942 | } |
| 5943 | |
| 5944 | return need_tlb_flush; |
| 5945 | } |
| 5946 | |
| 5947 | void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 5948 | const struct kvm_memory_slot *memslot) |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5949 | { |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 5950 | /* FIXME: const-ify all uses of struct kvm_memory_slot. */ |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5951 | spin_lock(&kvm->mmu_lock); |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 5952 | slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, |
| 5953 | kvm_mmu_zap_collapsible_spte, true); |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5954 | spin_unlock(&kvm->mmu_lock); |
| 5955 | } |
| 5956 | |
Sean Christopherson | b3594ff | 2020-02-18 13:07:34 -0800 | [diff] [blame] | 5957 | void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, |
| 5958 | struct kvm_memory_slot *memslot) |
| 5959 | { |
| 5960 | /* |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 5961 | * All current use cases for flushing the TLBs for a specific memslot |
| 5962 | * are related to dirty logging, and do the TLB flush out of mmu_lock. |
| 5963 | * The interaction between the various operations on memslot must be |
| 5964 | * serialized by slots_locks to ensure the TLB flush from one operation |
| 5965 | * is observed by any other operation on the same memslot. |
Sean Christopherson | b3594ff | 2020-02-18 13:07:34 -0800 | [diff] [blame] | 5966 | */ |
| 5967 | lockdep_assert_held(&kvm->slots_lock); |
Sean Christopherson | cec3764 | 2020-02-18 13:07:35 -0800 | [diff] [blame] | 5968 | kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, |
| 5969 | memslot->npages); |
Sean Christopherson | b3594ff | 2020-02-18 13:07:34 -0800 | [diff] [blame] | 5970 | } |
| 5971 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5972 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
| 5973 | struct kvm_memory_slot *memslot) |
| 5974 | { |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5975 | bool flush; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5976 | |
| 5977 | spin_lock(&kvm->mmu_lock); |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5978 | flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5979 | spin_unlock(&kvm->mmu_lock); |
| 5980 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5981 | /* |
| 5982 | * It's also safe to flush TLBs out of mmu lock here as currently this |
| 5983 | * function is only used for dirty logging, in which case flushing TLB |
| 5984 | * out of mmu lock also guarantees no dirty pages will be lost in |
| 5985 | * dirty_bitmap. |
| 5986 | */ |
| 5987 | if (flush) |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 5988 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5989 | } |
| 5990 | EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); |
| 5991 | |
| 5992 | void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, |
| 5993 | struct kvm_memory_slot *memslot) |
| 5994 | { |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5995 | bool flush; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 5996 | |
| 5997 | spin_lock(&kvm->mmu_lock); |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5998 | flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, |
| 5999 | false); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6000 | spin_unlock(&kvm->mmu_lock); |
| 6001 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6002 | if (flush) |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 6003 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6004 | } |
| 6005 | EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); |
| 6006 | |
| 6007 | void kvm_mmu_slot_set_dirty(struct kvm *kvm, |
| 6008 | struct kvm_memory_slot *memslot) |
| 6009 | { |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 6010 | bool flush; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6011 | |
| 6012 | spin_lock(&kvm->mmu_lock); |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 6013 | flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6014 | spin_unlock(&kvm->mmu_lock); |
| 6015 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6016 | if (flush) |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 6017 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6018 | } |
| 6019 | EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); |
| 6020 | |
Sean Christopherson | 92f58b5 | 2019-09-12 19:46:04 -0700 | [diff] [blame] | 6021 | void kvm_mmu_zap_all(struct kvm *kvm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 6022 | { |
| 6023 | struct kvm_mmu_page *sp, *node; |
Sean Christopherson | 7390de1 | 2019-02-05 13:01:31 -0800 | [diff] [blame] | 6024 | LIST_HEAD(invalid_list); |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 6025 | int ign; |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 6026 | |
Sean Christopherson | 7390de1 | 2019-02-05 13:01:31 -0800 | [diff] [blame] | 6027 | spin_lock(&kvm->mmu_lock); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 6028 | restart: |
Sean Christopherson | 8a674ad | 2019-02-05 13:01:32 -0800 | [diff] [blame] | 6029 | list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { |
| 6030 | if (sp->role.invalid && sp->root_count) |
| 6031 | continue; |
Sean Christopherson | 92f58b5 | 2019-09-12 19:46:04 -0700 | [diff] [blame] | 6032 | if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 6033 | goto restart; |
Sean Christopherson | 24efe61 | 2019-02-05 13:01:36 -0800 | [diff] [blame] | 6034 | if (cond_resched_lock(&kvm->mmu_lock)) |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 6035 | goto restart; |
| 6036 | } |
| 6037 | |
Sean Christopherson | 4771450 | 2019-02-05 13:01:23 -0800 | [diff] [blame] | 6038 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 6039 | spin_unlock(&kvm->mmu_lock); |
| 6040 | } |
| 6041 | |
Sean Christopherson | 1524825 | 2019-02-05 12:54:17 -0800 | [diff] [blame] | 6042 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 6043 | { |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 6044 | WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 6045 | |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 6046 | gen &= MMIO_SPTE_GEN_MASK; |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 6047 | |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 6048 | /* |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 6049 | * Generation numbers are incremented in multiples of the number of |
| 6050 | * address spaces in order to provide unique generations across all |
| 6051 | * address spaces. Strip what is effectively the address space |
| 6052 | * modifier prior to checking for a wrap of the MMIO generation so |
| 6053 | * that a wrap in any address space is detected. |
| 6054 | */ |
| 6055 | gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1); |
| 6056 | |
| 6057 | /* |
| 6058 | * The very rare case: if the MMIO generation number has wrapped, |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 6059 | * zap all shadow pages. |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 6060 | */ |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 6061 | if (unlikely(gen == 0)) { |
Bandan Das | ae0f549 | 2016-11-15 01:36:18 -0500 | [diff] [blame] | 6062 | kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); |
Sean Christopherson | 92f58b5 | 2019-09-12 19:46:04 -0700 | [diff] [blame] | 6063 | kvm_mmu_zap_all_fast(kvm); |
Takuya Yoshikawa | 7a2e8aa | 2013-06-21 01:34:31 +0900 | [diff] [blame] | 6064 | } |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 6065 | } |
| 6066 | |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6067 | static unsigned long |
| 6068 | mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6069 | { |
| 6070 | struct kvm *kvm; |
Ying Han | 1495f23 | 2011-05-24 17:12:27 -0700 | [diff] [blame] | 6071 | int nr_to_scan = sc->nr_to_scan; |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6072 | unsigned long freed = 0; |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6073 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 6074 | mutex_lock(&kvm_lock); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6075 | |
| 6076 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Jan Kiszka | 3d56cbd | 2011-12-02 18:35:24 +0100 | [diff] [blame] | 6077 | int idx; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 6078 | LIST_HEAD(invalid_list); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6079 | |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6080 | /* |
Takuya Yoshikawa | 35f2d16 | 2012-08-20 18:35:39 +0900 | [diff] [blame] | 6081 | * Never scan more than sc->nr_to_scan VM instances. |
| 6082 | * Will not hit this condition practically since we do not try |
| 6083 | * to shrink more than one VM and it is very unlikely to see |
| 6084 | * !n_used_mmu_pages so many times. |
| 6085 | */ |
| 6086 | if (!nr_to_scan--) |
| 6087 | break; |
| 6088 | /* |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6089 | * n_used_mmu_pages is accessed without holding kvm->mmu_lock |
| 6090 | * here. We may skip a VM instance errorneosly, but we do not |
| 6091 | * want to shrink a VM that only started to populate its MMU |
| 6092 | * anyway. |
| 6093 | */ |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 6094 | if (!kvm->arch.n_used_mmu_pages && |
| 6095 | !kvm_has_zapped_obsolete_pages(kvm)) |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6096 | continue; |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6097 | |
Marcelo Tosatti | f656ce0 | 2009-12-23 14:35:25 -0200 | [diff] [blame] | 6098 | idx = srcu_read_lock(&kvm->srcu); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6099 | spin_lock(&kvm->mmu_lock); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6100 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 6101 | if (kvm_has_zapped_obsolete_pages(kvm)) { |
| 6102 | kvm_mmu_commit_zap_page(kvm, |
| 6103 | &kvm->arch.zapped_obsolete_pages); |
| 6104 | goto unlock; |
| 6105 | } |
| 6106 | |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6107 | if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) |
| 6108 | freed++; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 6109 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6110 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 6111 | unlock: |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6112 | spin_unlock(&kvm->mmu_lock); |
Marcelo Tosatti | f656ce0 | 2009-12-23 14:35:25 -0200 | [diff] [blame] | 6113 | srcu_read_unlock(&kvm->srcu, idx); |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6114 | |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6115 | /* |
| 6116 | * unfair on small ones |
| 6117 | * per-vm shrinkers cry out |
| 6118 | * sadness comes quickly |
| 6119 | */ |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6120 | list_move_tail(&kvm->vm_list, &vm_list); |
| 6121 | break; |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6122 | } |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6123 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 6124 | mutex_unlock(&kvm_lock); |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6125 | return freed; |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6126 | } |
| 6127 | |
| 6128 | static unsigned long |
| 6129 | mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
| 6130 | { |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 6131 | return percpu_counter_read_positive(&kvm_total_used_mmu_pages); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6132 | } |
| 6133 | |
| 6134 | static struct shrinker mmu_shrinker = { |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6135 | .count_objects = mmu_shrink_count, |
| 6136 | .scan_objects = mmu_shrink_scan, |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6137 | .seeks = DEFAULT_SEEKS * 10, |
| 6138 | }; |
| 6139 | |
Ingo Molnar | 2ddfd20 | 2008-05-22 10:37:48 +0200 | [diff] [blame] | 6140 | static void mmu_destroy_caches(void) |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6141 | { |
Tim Hansen | c1bd743 | 2017-10-07 23:15:23 -0400 | [diff] [blame] | 6142 | kmem_cache_destroy(pte_list_desc_cache); |
| 6143 | kmem_cache_destroy(mmu_page_header_cache); |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6144 | } |
| 6145 | |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6146 | static void kvm_set_mmio_spte_mask(void) |
| 6147 | { |
| 6148 | u64 mask; |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6149 | |
| 6150 | /* |
| 6151 | * Set the reserved bits and the present bit of an paging-structure |
| 6152 | * entry to generate page fault with PFER.RSV = 1. |
| 6153 | */ |
| 6154 | |
| 6155 | /* |
| 6156 | * Mask the uppermost physical address bit, which would be reserved as |
| 6157 | * long as the supported physical address width is less than 52. |
| 6158 | */ |
| 6159 | mask = 1ull << 51; |
| 6160 | |
| 6161 | /* Set the present bit. */ |
| 6162 | mask |= 1ull; |
| 6163 | |
| 6164 | /* |
| 6165 | * If reserved bit is not supported, clear the present bit to disable |
| 6166 | * mmio page fault. |
| 6167 | */ |
Sean Christopherson | e30a7d6 | 2020-01-07 16:12:10 -0800 | [diff] [blame] | 6168 | if (shadow_phys_bits == 52) |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6169 | mask &= ~1ull; |
| 6170 | |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 6171 | kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6172 | } |
| 6173 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 6174 | static bool get_nx_auto_mode(void) |
| 6175 | { |
| 6176 | /* Return true when CPU has the bug, and mitigations are ON */ |
| 6177 | return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off(); |
| 6178 | } |
| 6179 | |
| 6180 | static void __set_nx_huge_pages(bool val) |
| 6181 | { |
| 6182 | nx_huge_pages = itlb_multihit_kvm_mitigation = val; |
| 6183 | } |
| 6184 | |
| 6185 | static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) |
| 6186 | { |
| 6187 | bool old_val = nx_huge_pages; |
| 6188 | bool new_val; |
| 6189 | |
| 6190 | /* In "auto" mode deploy workaround only if CPU has the bug. */ |
| 6191 | if (sysfs_streq(val, "off")) |
| 6192 | new_val = 0; |
| 6193 | else if (sysfs_streq(val, "force")) |
| 6194 | new_val = 1; |
| 6195 | else if (sysfs_streq(val, "auto")) |
| 6196 | new_val = get_nx_auto_mode(); |
| 6197 | else if (strtobool(val, &new_val) < 0) |
| 6198 | return -EINVAL; |
| 6199 | |
| 6200 | __set_nx_huge_pages(new_val); |
| 6201 | |
| 6202 | if (new_val != old_val) { |
| 6203 | struct kvm *kvm; |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 6204 | |
| 6205 | mutex_lock(&kvm_lock); |
| 6206 | |
| 6207 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Sean Christopherson | ed69a6c | 2019-11-13 11:30:32 -0800 | [diff] [blame] | 6208 | mutex_lock(&kvm->slots_lock); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 6209 | kvm_mmu_zap_all_fast(kvm); |
Sean Christopherson | ed69a6c | 2019-11-13 11:30:32 -0800 | [diff] [blame] | 6210 | mutex_unlock(&kvm->slots_lock); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6211 | |
| 6212 | wake_up_process(kvm->arch.nx_lpage_recovery_thread); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 6213 | } |
| 6214 | mutex_unlock(&kvm_lock); |
| 6215 | } |
| 6216 | |
| 6217 | return 0; |
| 6218 | } |
| 6219 | |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6220 | int kvm_mmu_module_init(void) |
| 6221 | { |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6222 | int ret = -ENOMEM; |
| 6223 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 6224 | if (nx_huge_pages == -1) |
| 6225 | __set_nx_huge_pages(get_nx_auto_mode()); |
| 6226 | |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 6227 | /* |
| 6228 | * MMU roles use union aliasing which is, generally speaking, an |
| 6229 | * undefined behavior. However, we supposedly know how compilers behave |
| 6230 | * and the current status quo is unlikely to change. Guardians below are |
| 6231 | * supposed to let us know if the assumption becomes false. |
| 6232 | */ |
| 6233 | BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32)); |
| 6234 | BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32)); |
| 6235 | BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64)); |
| 6236 | |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 6237 | kvm_mmu_reset_all_pte_masks(); |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 6238 | |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6239 | kvm_set_mmio_spte_mask(); |
| 6240 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 6241 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", |
| 6242 | sizeof(struct pte_list_desc), |
Shakeel Butt | 46bea48 | 2017-10-05 18:07:24 -0700 | [diff] [blame] | 6243 | 0, SLAB_ACCOUNT, NULL); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 6244 | if (!pte_list_desc_cache) |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6245 | goto out; |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6246 | |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 6247 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", |
| 6248 | sizeof(struct kvm_mmu_page), |
Shakeel Butt | 46bea48 | 2017-10-05 18:07:24 -0700 | [diff] [blame] | 6249 | 0, SLAB_ACCOUNT, NULL); |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 6250 | if (!mmu_page_header_cache) |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6251 | goto out; |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 6252 | |
Tejun Heo | 908c7f1 | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 6253 | if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6254 | goto out; |
Wei Yongjun | 45bf21a | 2010-08-23 16:13:15 +0800 | [diff] [blame] | 6255 | |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6256 | ret = register_shrinker(&mmu_shrinker); |
| 6257 | if (ret) |
| 6258 | goto out; |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6259 | |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6260 | return 0; |
| 6261 | |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6262 | out: |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6263 | mmu_destroy_caches(); |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6264 | return ret; |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6265 | } |
| 6266 | |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6267 | /* |
Peng Hao | 39337ad | 2018-10-04 11:45:00 -0400 | [diff] [blame] | 6268 | * Calculate mmu pages needed for kvm. |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6269 | */ |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 6270 | unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6271 | { |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 6272 | unsigned long nr_mmu_pages; |
| 6273 | unsigned long nr_pages = 0; |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 6274 | struct kvm_memslots *slots; |
Xiao Guangrong | be6ba0f | 2011-11-24 17:39:18 +0800 | [diff] [blame] | 6275 | struct kvm_memory_slot *memslot; |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 6276 | int i; |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6277 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 6278 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 6279 | slots = __kvm_memslots(kvm, i); |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 6280 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 6281 | kvm_for_each_memslot(memslot, slots) |
| 6282 | nr_pages += memslot->npages; |
| 6283 | } |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6284 | |
| 6285 | nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 6286 | nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6287 | |
| 6288 | return nr_mmu_pages; |
| 6289 | } |
| 6290 | |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 6291 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu) |
| 6292 | { |
Paolo Bonzini | 95f93af | 2013-10-02 16:56:12 +0200 | [diff] [blame] | 6293 | kvm_mmu_unload(vcpu); |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 6294 | free_mmu_pages(&vcpu->arch.root_mmu); |
| 6295 | free_mmu_pages(&vcpu->arch.guest_mmu); |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 6296 | mmu_free_memory_caches(vcpu); |
Xiao Guangrong | b034cf0 | 2010-12-23 16:08:35 +0800 | [diff] [blame] | 6297 | } |
| 6298 | |
Xiao Guangrong | b034cf0 | 2010-12-23 16:08:35 +0800 | [diff] [blame] | 6299 | void kvm_mmu_module_exit(void) |
| 6300 | { |
| 6301 | mmu_destroy_caches(); |
| 6302 | percpu_counter_destroy(&kvm_total_used_mmu_pages); |
| 6303 | unregister_shrinker(&mmu_shrinker); |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 6304 | mmu_audit_disable(); |
| 6305 | } |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6306 | |
| 6307 | static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp) |
| 6308 | { |
| 6309 | unsigned int old_val; |
| 6310 | int err; |
| 6311 | |
| 6312 | old_val = nx_huge_pages_recovery_ratio; |
| 6313 | err = param_set_uint(val, kp); |
| 6314 | if (err) |
| 6315 | return err; |
| 6316 | |
| 6317 | if (READ_ONCE(nx_huge_pages) && |
| 6318 | !old_val && nx_huge_pages_recovery_ratio) { |
| 6319 | struct kvm *kvm; |
| 6320 | |
| 6321 | mutex_lock(&kvm_lock); |
| 6322 | |
| 6323 | list_for_each_entry(kvm, &vm_list, vm_list) |
| 6324 | wake_up_process(kvm->arch.nx_lpage_recovery_thread); |
| 6325 | |
| 6326 | mutex_unlock(&kvm_lock); |
| 6327 | } |
| 6328 | |
| 6329 | return err; |
| 6330 | } |
| 6331 | |
| 6332 | static void kvm_recover_nx_lpages(struct kvm *kvm) |
| 6333 | { |
| 6334 | int rcu_idx; |
| 6335 | struct kvm_mmu_page *sp; |
| 6336 | unsigned int ratio; |
| 6337 | LIST_HEAD(invalid_list); |
| 6338 | ulong to_zap; |
| 6339 | |
| 6340 | rcu_idx = srcu_read_lock(&kvm->srcu); |
| 6341 | spin_lock(&kvm->mmu_lock); |
| 6342 | |
| 6343 | ratio = READ_ONCE(nx_huge_pages_recovery_ratio); |
| 6344 | to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; |
| 6345 | while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) { |
| 6346 | /* |
| 6347 | * We use a separate list instead of just using active_mmu_pages |
| 6348 | * because the number of lpage_disallowed pages is expected to |
| 6349 | * be relatively small compared to the total. |
| 6350 | */ |
| 6351 | sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages, |
| 6352 | struct kvm_mmu_page, |
| 6353 | lpage_disallowed_link); |
| 6354 | WARN_ON_ONCE(!sp->lpage_disallowed); |
| 6355 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
| 6356 | WARN_ON_ONCE(sp->lpage_disallowed); |
| 6357 | |
| 6358 | if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) { |
| 6359 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
| 6360 | if (to_zap) |
| 6361 | cond_resched_lock(&kvm->mmu_lock); |
| 6362 | } |
| 6363 | } |
| 6364 | |
| 6365 | spin_unlock(&kvm->mmu_lock); |
| 6366 | srcu_read_unlock(&kvm->srcu, rcu_idx); |
| 6367 | } |
| 6368 | |
| 6369 | static long get_nx_lpage_recovery_timeout(u64 start_time) |
| 6370 | { |
| 6371 | return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio) |
| 6372 | ? start_time + 60 * HZ - get_jiffies_64() |
| 6373 | : MAX_SCHEDULE_TIMEOUT; |
| 6374 | } |
| 6375 | |
| 6376 | static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) |
| 6377 | { |
| 6378 | u64 start_time; |
| 6379 | long remaining_time; |
| 6380 | |
| 6381 | while (true) { |
| 6382 | start_time = get_jiffies_64(); |
| 6383 | remaining_time = get_nx_lpage_recovery_timeout(start_time); |
| 6384 | |
| 6385 | set_current_state(TASK_INTERRUPTIBLE); |
| 6386 | while (!kthread_should_stop() && remaining_time > 0) { |
| 6387 | schedule_timeout(remaining_time); |
| 6388 | remaining_time = get_nx_lpage_recovery_timeout(start_time); |
| 6389 | set_current_state(TASK_INTERRUPTIBLE); |
| 6390 | } |
| 6391 | |
| 6392 | set_current_state(TASK_RUNNING); |
| 6393 | |
| 6394 | if (kthread_should_stop()) |
| 6395 | return 0; |
| 6396 | |
| 6397 | kvm_recover_nx_lpages(kvm); |
| 6398 | } |
| 6399 | } |
| 6400 | |
| 6401 | int kvm_mmu_post_init_vm(struct kvm *kvm) |
| 6402 | { |
| 6403 | int err; |
| 6404 | |
| 6405 | err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0, |
| 6406 | "kvm-nx-lpage-recovery", |
| 6407 | &kvm->arch.nx_lpage_recovery_thread); |
| 6408 | if (!err) |
| 6409 | kthread_unpark(kvm->arch.nx_lpage_recovery_thread); |
| 6410 | |
| 6411 | return err; |
| 6412 | } |
| 6413 | |
| 6414 | void kvm_mmu_pre_destroy_vm(struct kvm *kvm) |
| 6415 | { |
| 6416 | if (kvm->arch.nx_lpage_recovery_thread) |
| 6417 | kthread_stop(kvm->arch.nx_lpage_recovery_thread); |
| 6418 | } |