Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * This module enables machines with Intel VT-x extensions to run virtual |
| 6 | * machines without emulation or binary translation. |
| 7 | * |
| 8 | * MMU support |
| 9 | * |
| 10 | * Copyright (C) 2006 Qumranet, Inc. |
Nicolas Kaiser | 9611c18 | 2010-10-06 14:23:22 +0200 | [diff] [blame] | 11 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 12 | * |
| 13 | * Authors: |
| 14 | * Yaniv Kamay <yaniv@qumranet.com> |
| 15 | * Avi Kivity <avi@qumranet.com> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 16 | */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 17 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 18 | #include "irq.h" |
彭浩(Richard) | 88197e6 | 2020-05-21 05:57:49 +0000 | [diff] [blame] | 19 | #include "ioapic.h" |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 20 | #include "mmu.h" |
Sean Christopherson | 6ca9a6f | 2020-06-22 13:20:31 -0700 | [diff] [blame] | 21 | #include "mmu_internal.h" |
Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 22 | #include "x86.h" |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 23 | #include "kvm_cache_regs.h" |
Sean Christopherson | 2f728d6 | 2020-02-18 15:29:49 -0800 | [diff] [blame] | 24 | #include "kvm_emulate.h" |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 25 | #include "cpuid.h" |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 26 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 27 | #include <linux/kvm_host.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 28 | #include <linux/types.h> |
| 29 | #include <linux/string.h> |
| 30 | #include <linux/mm.h> |
| 31 | #include <linux/highmem.h> |
Paul Gortmaker | 1767e93 | 2016-07-13 20:19:00 -0400 | [diff] [blame] | 32 | #include <linux/moduleparam.h> |
| 33 | #include <linux/export.h> |
Izik Eidus | 448353c | 2007-11-26 14:08:14 +0200 | [diff] [blame] | 34 | #include <linux/swap.h> |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 35 | #include <linux/hugetlb.h> |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 36 | #include <linux/compiler.h> |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 37 | #include <linux/srcu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 38 | #include <linux/slab.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 39 | #include <linux/sched/signal.h> |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 40 | #include <linux/uaccess.h> |
David Matlack | 114df30 | 2016-12-19 13:58:25 -0800 | [diff] [blame] | 41 | #include <linux/hash.h> |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 42 | #include <linux/kern_levels.h> |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 43 | #include <linux/kthread.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 44 | |
| 45 | #include <asm/page.h> |
Ingo Molnar | eb243d1 | 2019-11-20 15:33:57 +0100 | [diff] [blame] | 46 | #include <asm/memtype.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 47 | #include <asm/cmpxchg.h> |
KarimAllah Ahmed | 0c55671 | 2019-01-31 21:24:44 +0100 | [diff] [blame] | 48 | #include <asm/e820/api.h> |
Avi Kivity | 4e54237 | 2007-11-21 14:08:40 +0200 | [diff] [blame] | 49 | #include <asm/io.h> |
Eduardo Habkost | 13673a9 | 2008-11-17 19:03:13 -0200 | [diff] [blame] | 50 | #include <asm/vmx.h> |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 51 | #include <asm/kvm_page_track.h> |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 52 | #include "trace.h" |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 53 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 54 | extern bool itlb_multihit_kvm_mitigation; |
| 55 | |
| 56 | static int __read_mostly nx_huge_pages = -1; |
Paolo Bonzini | 13fb592 | 2019-11-13 15:47:06 +0100 | [diff] [blame] | 57 | #ifdef CONFIG_PREEMPT_RT |
| 58 | /* Recovery can cause latency spikes, disable it for PREEMPT_RT. */ |
| 59 | static uint __read_mostly nx_huge_pages_recovery_ratio = 0; |
| 60 | #else |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 61 | static uint __read_mostly nx_huge_pages_recovery_ratio = 60; |
Paolo Bonzini | 13fb592 | 2019-11-13 15:47:06 +0100 | [diff] [blame] | 62 | #endif |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 63 | |
| 64 | static int set_nx_huge_pages(const char *val, const struct kernel_param *kp); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 65 | static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 66 | |
Joe Perches | d5d6c18 | 2020-10-03 17:18:07 -0700 | [diff] [blame^] | 67 | static const struct kernel_param_ops nx_huge_pages_ops = { |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 68 | .set = set_nx_huge_pages, |
| 69 | .get = param_get_bool, |
| 70 | }; |
| 71 | |
Joe Perches | d5d6c18 | 2020-10-03 17:18:07 -0700 | [diff] [blame^] | 72 | static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = { |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 73 | .set = set_nx_huge_pages_recovery_ratio, |
| 74 | .get = param_get_uint, |
| 75 | }; |
| 76 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 77 | module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644); |
| 78 | __MODULE_PARM_TYPE(nx_huge_pages, "bool"); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 79 | module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops, |
| 80 | &nx_huge_pages_recovery_ratio, 0644); |
| 81 | __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint"); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 82 | |
Sean Christopherson | 71fe701 | 2020-03-20 14:28:28 -0700 | [diff] [blame] | 83 | static bool __read_mostly force_flush_and_sync_on_reuse; |
| 84 | module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644); |
| 85 | |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 86 | /* |
| 87 | * When setting this variable to true it enables Two-Dimensional-Paging |
| 88 | * where the hardware walks 2 page tables: |
| 89 | * 1. the guest-virtual to guest-physical |
| 90 | * 2. while doing 1. it walks guest-physical to host-physical |
| 91 | * If the hardware supports that we don't need to do shadow paging. |
| 92 | */ |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 93 | bool tdp_enabled = false; |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 94 | |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 95 | static int max_huge_page_level __read_mostly; |
Sean Christopherson | 8301305 | 2020-07-15 20:41:22 -0700 | [diff] [blame] | 96 | static int max_tdp_level __read_mostly; |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 97 | |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 98 | enum { |
| 99 | AUDIT_PRE_PAGE_FAULT, |
| 100 | AUDIT_POST_PAGE_FAULT, |
| 101 | AUDIT_PRE_PTE_WRITE, |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 102 | AUDIT_POST_PTE_WRITE, |
| 103 | AUDIT_PRE_SYNC, |
| 104 | AUDIT_POST_SYNC |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 105 | }; |
| 106 | |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 107 | #undef MMU_DEBUG |
| 108 | |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 109 | #ifdef MMU_DEBUG |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 110 | static bool dbg = 0; |
| 111 | module_param(dbg, bool, 0644); |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 112 | |
| 113 | #define pgprintk(x...) do { if (dbg) printk(x); } while (0) |
| 114 | #define rmap_printk(x...) do { if (dbg) printk(x); } while (0) |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 115 | #define MMU_WARN_ON(x) WARN_ON(x) |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 116 | #else |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 117 | #define pgprintk(x...) do { } while (0) |
| 118 | #define rmap_printk(x...) do { } while (0) |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 119 | #define MMU_WARN_ON(x) do { } while (0) |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 120 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 121 | |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 122 | #define PTE_PREFETCH_NUM 8 |
| 123 | |
Xudong Hao | 00763e4 | 2012-06-07 18:26:07 +0800 | [diff] [blame] | 124 | #define PT_FIRST_AVAIL_BITS_SHIFT 10 |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 125 | #define PT64_SECOND_AVAIL_BITS_SHIFT 54 |
| 126 | |
| 127 | /* |
| 128 | * The mask used to denote special SPTEs, which can be either MMIO SPTEs or |
| 129 | * Access Tracking SPTEs. |
| 130 | */ |
| 131 | #define SPTE_SPECIAL_MASK (3ULL << 52) |
| 132 | #define SPTE_AD_ENABLED_MASK (0ULL << 52) |
| 133 | #define SPTE_AD_DISABLED_MASK (1ULL << 52) |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 134 | #define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52) |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 135 | #define SPTE_MMIO_MASK (3ULL << 52) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 136 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 137 | #define PT64_LEVEL_BITS 9 |
| 138 | |
| 139 | #define PT64_LEVEL_SHIFT(level) \ |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 140 | (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 141 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 142 | #define PT64_INDEX(address, level)\ |
| 143 | (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) |
| 144 | |
| 145 | |
| 146 | #define PT32_LEVEL_BITS 10 |
| 147 | |
| 148 | #define PT32_LEVEL_SHIFT(level) \ |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 149 | (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 150 | |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 151 | #define PT32_LVL_OFFSET_MASK(level) \ |
| 152 | (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 153 | * PT32_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 154 | |
| 155 | #define PT32_INDEX(address, level)\ |
| 156 | (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) |
| 157 | |
| 158 | |
Kai Huang | 8acc099 | 2019-01-15 17:28:40 +1300 | [diff] [blame] | 159 | #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK |
| 160 | #define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1)) |
| 161 | #else |
| 162 | #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) |
| 163 | #endif |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 164 | #define PT64_LVL_ADDR_MASK(level) \ |
| 165 | (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 166 | * PT64_LEVEL_BITS))) - 1)) |
| 167 | #define PT64_LVL_OFFSET_MASK(level) \ |
| 168 | (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 169 | * PT64_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 170 | |
| 171 | #define PT32_BASE_ADDR_MASK PAGE_MASK |
| 172 | #define PT32_DIR_BASE_ADDR_MASK \ |
| 173 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 174 | #define PT32_LVL_ADDR_MASK(level) \ |
| 175 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 176 | * PT32_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 177 | |
Gleb Natapov | 5316622 | 2013-08-05 11:07:14 +0300 | [diff] [blame] | 178 | #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 179 | | shadow_x_mask | shadow_nx_mask | shadow_me_mask) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 180 | |
Avi Kivity | fe135d2 | 2007-12-09 16:15:46 +0200 | [diff] [blame] | 181 | #define ACC_EXEC_MASK 1 |
| 182 | #define ACC_WRITE_MASK PT_WRITABLE_MASK |
| 183 | #define ACC_USER_MASK PT_USER_MASK |
| 184 | #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) |
| 185 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 186 | /* The mask for the R/X bits in EPT PTEs */ |
| 187 | #define PT64_EPT_READABLE_MASK 0x1ull |
| 188 | #define PT64_EPT_EXECUTABLE_MASK 0x4ull |
| 189 | |
Avi Kivity | 90bb6fc | 2009-12-31 12:10:16 +0200 | [diff] [blame] | 190 | #include <trace/events/kvm.h> |
| 191 | |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 192 | #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) |
| 193 | #define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) |
Izik Eidus | 1403283 | 2009-09-23 21:47:17 +0300 | [diff] [blame] | 194 | |
Avi Kivity | 135f8c2 | 2008-08-21 17:49:56 +0300 | [diff] [blame] | 195 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
| 196 | |
Takuya Yoshikawa | 220f773 | 2012-03-21 23:49:39 +0900 | [diff] [blame] | 197 | /* make pte_list_desc fit well in cache line */ |
| 198 | #define PTE_LIST_EXT 3 |
| 199 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 200 | /* |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 201 | * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault(). |
| 202 | * |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 203 | * RET_PF_RETRY: let CPU fault again on the address. |
| 204 | * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 205 | * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 206 | * RET_PF_FIXED: The faulting entry has been fixed. |
| 207 | * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU. |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 208 | */ |
| 209 | enum { |
| 210 | RET_PF_RETRY = 0, |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 211 | RET_PF_EMULATE, |
| 212 | RET_PF_INVALID, |
| 213 | RET_PF_FIXED, |
| 214 | RET_PF_SPURIOUS, |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 215 | }; |
| 216 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 217 | struct pte_list_desc { |
| 218 | u64 *sptes[PTE_LIST_EXT]; |
| 219 | struct pte_list_desc *more; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 220 | }; |
| 221 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 222 | struct kvm_shadow_walk_iterator { |
| 223 | u64 addr; |
| 224 | hpa_t shadow_addr; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 225 | u64 *sptep; |
Xiao Guangrong | dd3bfd5 | 2011-07-12 03:32:54 +0800 | [diff] [blame] | 226 | int level; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 227 | unsigned index; |
| 228 | }; |
| 229 | |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 230 | #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \ |
| 231 | for (shadow_walk_init_using_root(&(_walker), (_vcpu), \ |
| 232 | (_root), (_addr)); \ |
| 233 | shadow_walk_okay(&(_walker)); \ |
| 234 | shadow_walk_next(&(_walker))) |
| 235 | |
| 236 | #define for_each_shadow_entry(_vcpu, _addr, _walker) \ |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 237 | for (shadow_walk_init(&(_walker), _vcpu, _addr); \ |
| 238 | shadow_walk_okay(&(_walker)); \ |
| 239 | shadow_walk_next(&(_walker))) |
| 240 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 241 | #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ |
| 242 | for (shadow_walk_init(&(_walker), _vcpu, _addr); \ |
| 243 | shadow_walk_okay(&(_walker)) && \ |
| 244 | ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ |
| 245 | __shadow_walk_next(&(_walker), spte)) |
| 246 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 247 | static struct kmem_cache *pte_list_desc_cache; |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 248 | static struct kmem_cache *mmu_page_header_cache; |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 249 | static struct percpu_counter kvm_total_used_mmu_pages; |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 250 | |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 251 | static u64 __read_mostly shadow_nx_mask; |
| 252 | static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ |
| 253 | static u64 __read_mostly shadow_user_mask; |
| 254 | static u64 __read_mostly shadow_accessed_mask; |
| 255 | static u64 __read_mostly shadow_dirty_mask; |
Peter Feiner | dcdca5f | 2017-06-30 17:26:30 -0700 | [diff] [blame] | 256 | static u64 __read_mostly shadow_mmio_value; |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 257 | static u64 __read_mostly shadow_mmio_access_mask; |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 258 | static u64 __read_mostly shadow_present_mask; |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 259 | static u64 __read_mostly shadow_me_mask; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 260 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 261 | /* |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 262 | * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK; |
| 263 | * shadow_acc_track_mask is the set of bits to be cleared in non-accessed |
| 264 | * pages. |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 265 | */ |
| 266 | static u64 __read_mostly shadow_acc_track_mask; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 267 | |
| 268 | /* |
| 269 | * The mask/shift to use for saving the original R/X bits when marking the PTE |
| 270 | * as not-present for access tracking purposes. We do not save the W bit as the |
| 271 | * PTEs being access tracked also need to be dirty tracked, so the W bit will be |
| 272 | * restored only when a write is attempted to the page. |
| 273 | */ |
| 274 | static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK | |
| 275 | PT64_EPT_EXECUTABLE_MASK; |
| 276 | static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT; |
| 277 | |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 278 | /* |
| 279 | * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order |
| 280 | * to guard against L1TF attacks. |
| 281 | */ |
| 282 | static u64 __read_mostly shadow_nonpresent_or_rsvd_mask; |
| 283 | |
| 284 | /* |
| 285 | * The number of high-order 1 bits to use in the mask above. |
| 286 | */ |
| 287 | static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; |
| 288 | |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 289 | /* |
| 290 | * In some cases, we need to preserve the GFN of a non-present or reserved |
| 291 | * SPTE when we usurp the upper five bits of the physical address space to |
| 292 | * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll |
| 293 | * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask |
| 294 | * left into the reserved bits, i.e. the GFN in the SPTE will be split into |
| 295 | * high and low parts. This mask covers the lower bits of the GFN. |
| 296 | */ |
| 297 | static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; |
| 298 | |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 299 | /* |
| 300 | * The number of non-reserved physical address bits irrespective of features |
| 301 | * that repurpose legal bits, e.g. MKTME. |
| 302 | */ |
| 303 | static u8 __read_mostly shadow_phys_bits; |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 304 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 305 | static void mmu_spte_set(u64 *sptep, u64 spte); |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 306 | static bool is_executable_pte(u64 spte); |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 307 | static union kvm_mmu_page_role |
| 308 | kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 309 | |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 310 | #define CREATE_TRACE_POINTS |
| 311 | #include "mmutrace.h" |
| 312 | |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 313 | |
| 314 | static inline bool kvm_available_flush_tlb_with_range(void) |
| 315 | { |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 316 | return kvm_x86_ops.tlb_remote_flush_with_range; |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 317 | } |
| 318 | |
| 319 | static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, |
| 320 | struct kvm_tlb_range *range) |
| 321 | { |
| 322 | int ret = -ENOTSUPP; |
| 323 | |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 324 | if (range && kvm_x86_ops.tlb_remote_flush_with_range) |
| 325 | ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range); |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 326 | |
| 327 | if (ret) |
| 328 | kvm_flush_remote_tlbs(kvm); |
| 329 | } |
| 330 | |
| 331 | static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, |
| 332 | u64 start_gfn, u64 pages) |
| 333 | { |
| 334 | struct kvm_tlb_range range; |
| 335 | |
| 336 | range.start_gfn = start_gfn; |
| 337 | range.pages = pages; |
| 338 | |
| 339 | kvm_flush_remote_tlbs_with_range(kvm, &range); |
| 340 | } |
| 341 | |
Paolo Bonzini | e7581ca | 2020-05-19 05:04:49 -0400 | [diff] [blame] | 342 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 343 | { |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 344 | BUG_ON((u64)(unsigned)access_mask != access_mask); |
Paolo Bonzini | d43e267 | 2020-05-19 05:34:41 -0400 | [diff] [blame] | 345 | WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); |
| 346 | WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 347 | shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 348 | shadow_mmio_access_mask = access_mask; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 349 | } |
| 350 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); |
| 351 | |
Sean Christopherson | 26c44a6 | 2019-08-01 13:35:23 -0700 | [diff] [blame] | 352 | static bool is_mmio_spte(u64 spte) |
| 353 | { |
Paolo Bonzini | e7581ca | 2020-05-19 05:04:49 -0400 | [diff] [blame] | 354 | return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK; |
Sean Christopherson | 26c44a6 | 2019-08-01 13:35:23 -0700 | [diff] [blame] | 355 | } |
| 356 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 357 | static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) |
| 358 | { |
| 359 | return sp->role.ad_disabled; |
| 360 | } |
| 361 | |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 362 | static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) |
| 363 | { |
| 364 | /* |
| 365 | * When using the EPT page-modification log, the GPAs in the log |
| 366 | * would come from L2 rather than L1. Therefore, we need to rely |
| 367 | * on write protection to record dirty pages. This also bypasses |
| 368 | * PML, since writes now result in a vmexit. |
| 369 | */ |
| 370 | return vcpu->arch.mmu == &vcpu->arch.guest_mmu; |
| 371 | } |
| 372 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 373 | static inline bool spte_ad_enabled(u64 spte) |
| 374 | { |
Sean Christopherson | 26c44a6 | 2019-08-01 13:35:23 -0700 | [diff] [blame] | 375 | MMU_WARN_ON(is_mmio_spte(spte)); |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 376 | return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK; |
| 377 | } |
| 378 | |
| 379 | static inline bool spte_ad_need_write_protect(u64 spte) |
| 380 | { |
| 381 | MMU_WARN_ON(is_mmio_spte(spte)); |
| 382 | return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK; |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 383 | } |
| 384 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 385 | static bool is_nx_huge_page_enabled(void) |
| 386 | { |
| 387 | return READ_ONCE(nx_huge_pages); |
| 388 | } |
| 389 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 390 | static inline u64 spte_shadow_accessed_mask(u64 spte) |
| 391 | { |
Sean Christopherson | 26c44a6 | 2019-08-01 13:35:23 -0700 | [diff] [blame] | 392 | MMU_WARN_ON(is_mmio_spte(spte)); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 393 | return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; |
| 394 | } |
| 395 | |
| 396 | static inline u64 spte_shadow_dirty_mask(u64 spte) |
| 397 | { |
Sean Christopherson | 26c44a6 | 2019-08-01 13:35:23 -0700 | [diff] [blame] | 398 | MMU_WARN_ON(is_mmio_spte(spte)); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 399 | return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; |
| 400 | } |
| 401 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 402 | static inline bool is_access_track_spte(u64 spte) |
| 403 | { |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 404 | return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 405 | } |
| 406 | |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 407 | /* |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 408 | * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of |
| 409 | * the memslots generation and is derived as follows: |
David Matlack | ee3d157 | 2014-08-18 15:46:06 -0700 | [diff] [blame] | 410 | * |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 411 | * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11 |
| 412 | * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61 |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 413 | * |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 414 | * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in |
| 415 | * the MMIO generation number, as doing so would require stealing a bit from |
| 416 | * the "real" generation number and thus effectively halve the maximum number |
| 417 | * of MMIO generations that can be handled before encountering a wrap (which |
| 418 | * requires a full MMU zap). The flag is instead explicitly queried when |
| 419 | * checking for MMIO spte cache hits. |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 420 | */ |
Paolo Bonzini | 56871d4 | 2020-01-18 20:09:03 +0100 | [diff] [blame] | 421 | #define MMIO_SPTE_GEN_MASK GENMASK_ULL(17, 0) |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 422 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 423 | #define MMIO_SPTE_GEN_LOW_START 3 |
| 424 | #define MMIO_SPTE_GEN_LOW_END 11 |
| 425 | #define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ |
| 426 | MMIO_SPTE_GEN_LOW_START) |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 427 | |
Paolo Bonzini | 56871d4 | 2020-01-18 20:09:03 +0100 | [diff] [blame] | 428 | #define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT |
| 429 | #define MMIO_SPTE_GEN_HIGH_END 62 |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 430 | #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ |
| 431 | MMIO_SPTE_GEN_HIGH_START) |
Paolo Bonzini | 56871d4 | 2020-01-18 20:09:03 +0100 | [diff] [blame] | 432 | |
Sean Christopherson | 5192f9b | 2019-02-05 13:01:15 -0800 | [diff] [blame] | 433 | static u64 generation_mmio_spte_mask(u64 gen) |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 434 | { |
| 435 | u64 mask; |
| 436 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 437 | WARN_ON(gen & ~MMIO_SPTE_GEN_MASK); |
Paolo Bonzini | 56871d4 | 2020-01-18 20:09:03 +0100 | [diff] [blame] | 438 | BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK); |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 439 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 440 | mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK; |
| 441 | mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK; |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 442 | return mask; |
| 443 | } |
| 444 | |
Sean Christopherson | 5192f9b | 2019-02-05 13:01:15 -0800 | [diff] [blame] | 445 | static u64 get_mmio_spte_generation(u64 spte) |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 446 | { |
Sean Christopherson | 5192f9b | 2019-02-05 13:01:15 -0800 | [diff] [blame] | 447 | u64 gen; |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 448 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 449 | gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START; |
| 450 | gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START; |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 451 | return gen; |
| 452 | } |
| 453 | |
Ben Gardon | 8f79b06 | 2020-02-03 15:09:10 -0800 | [diff] [blame] | 454 | static u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 455 | { |
Ben Gardon | 8f79b06 | 2020-02-03 15:09:10 -0800 | [diff] [blame] | 456 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 457 | u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 458 | u64 mask = generation_mmio_spte_mask(gen); |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 459 | u64 gpa = gfn << PAGE_SHIFT; |
Takuya Yoshikawa | 95b0430 | 2013-03-12 17:44:40 +0900 | [diff] [blame] | 460 | |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 461 | access &= shadow_mmio_access_mask; |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 462 | mask |= shadow_mmio_value | access; |
| 463 | mask |= gpa | shadow_nonpresent_or_rsvd_mask; |
| 464 | mask |= (gpa & shadow_nonpresent_or_rsvd_mask) |
| 465 | << shadow_nonpresent_or_rsvd_mask_len; |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 466 | |
Ben Gardon | 8f79b06 | 2020-02-03 15:09:10 -0800 | [diff] [blame] | 467 | return mask; |
| 468 | } |
| 469 | |
| 470 | static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, |
| 471 | unsigned int access) |
| 472 | { |
| 473 | u64 mask = make_mmio_spte(vcpu, gfn, access); |
| 474 | unsigned int gen = get_mmio_spte_generation(mask); |
| 475 | |
| 476 | access = mask & ACC_ALL; |
| 477 | |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 478 | trace_mark_mmio_spte(sptep, gfn, access, gen); |
Xiao Guangrong | f2fd125 | 2013-06-07 16:51:24 +0800 | [diff] [blame] | 479 | mmu_spte_set(sptep, mask); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 480 | } |
| 481 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 482 | static gfn_t get_mmio_spte_gfn(u64 spte) |
| 483 | { |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 484 | u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 485 | |
| 486 | gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) |
| 487 | & shadow_nonpresent_or_rsvd_mask; |
| 488 | |
| 489 | return gpa >> PAGE_SHIFT; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 490 | } |
| 491 | |
| 492 | static unsigned get_mmio_spte_access(u64 spte) |
| 493 | { |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 494 | return spte & shadow_mmio_access_mask; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 495 | } |
| 496 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 497 | static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 498 | kvm_pfn_t pfn, unsigned int access) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 499 | { |
| 500 | if (unlikely(is_noslot_pfn(pfn))) { |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 501 | mark_mmio_spte(vcpu, sptep, gfn, access); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 502 | return true; |
| 503 | } |
| 504 | |
| 505 | return false; |
| 506 | } |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 507 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 508 | static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 509 | { |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 510 | u64 kvm_gen, spte_gen, gen; |
Xiao Guangrong | 089504c | 2013-06-07 16:51:27 +0800 | [diff] [blame] | 511 | |
Sean Christopherson | cae7ed3 | 2019-02-05 13:01:16 -0800 | [diff] [blame] | 512 | gen = kvm_vcpu_memslots(vcpu)->generation; |
| 513 | if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) |
| 514 | return false; |
| 515 | |
| 516 | kvm_gen = gen & MMIO_SPTE_GEN_MASK; |
Xiao Guangrong | 089504c | 2013-06-07 16:51:27 +0800 | [diff] [blame] | 517 | spte_gen = get_mmio_spte_generation(spte); |
| 518 | |
| 519 | trace_check_mmio_spte(spte, kvm_gen, spte_gen); |
| 520 | return likely(kvm_gen == spte_gen); |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 521 | } |
| 522 | |
Mohammed Gamal | cd31356 | 2020-07-10 17:48:04 +0200 | [diff] [blame] | 523 | static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, |
| 524 | struct x86_exception *exception) |
| 525 | { |
Mohammed Gamal | ec7771a | 2020-07-10 17:48:05 +0200 | [diff] [blame] | 526 | /* Check if guest physical address doesn't exceed guest maximum */ |
Sean Christopherson | dc46515 | 2020-09-24 12:42:49 -0700 | [diff] [blame] | 527 | if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) { |
Mohammed Gamal | ec7771a | 2020-07-10 17:48:05 +0200 | [diff] [blame] | 528 | exception->error_code |= PFERR_RSVD_MASK; |
| 529 | return UNMAPPED_GVA; |
| 530 | } |
| 531 | |
Mohammed Gamal | cd31356 | 2020-07-10 17:48:04 +0200 | [diff] [blame] | 532 | return gpa; |
| 533 | } |
| 534 | |
Peter Feiner | ce00053 | 2017-06-30 17:26:29 -0700 | [diff] [blame] | 535 | /* |
| 536 | * Sets the shadow PTE masks used by the MMU. |
| 537 | * |
| 538 | * Assumptions: |
| 539 | * - Setting either @accessed_mask or @dirty_mask requires setting both |
| 540 | * - At least one of @accessed_mask or @acc_track_mask must be set |
| 541 | */ |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 542 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 543 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 544 | u64 acc_track_mask, u64 me_mask) |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 545 | { |
Peter Feiner | ce00053 | 2017-06-30 17:26:29 -0700 | [diff] [blame] | 546 | BUG_ON(!dirty_mask != !accessed_mask); |
| 547 | BUG_ON(!accessed_mask && !acc_track_mask); |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 548 | BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK); |
Junaid Shahid | 312b616 | 2016-12-21 20:29:29 -0800 | [diff] [blame] | 549 | |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 550 | shadow_user_mask = user_mask; |
| 551 | shadow_accessed_mask = accessed_mask; |
| 552 | shadow_dirty_mask = dirty_mask; |
| 553 | shadow_nx_mask = nx_mask; |
| 554 | shadow_x_mask = x_mask; |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 555 | shadow_present_mask = p_mask; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 556 | shadow_acc_track_mask = acc_track_mask; |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 557 | shadow_me_mask = me_mask; |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 558 | } |
| 559 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
| 560 | |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 561 | static u8 kvm_get_shadow_phys_bits(void) |
| 562 | { |
| 563 | /* |
Paolo Bonzini | 7adacf5 | 2019-12-04 15:50:27 +0100 | [diff] [blame] | 564 | * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected |
| 565 | * in CPU detection code, but the processor treats those reduced bits as |
| 566 | * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at |
| 567 | * the physical address bits reported by CPUID. |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 568 | */ |
Paolo Bonzini | 7adacf5 | 2019-12-04 15:50:27 +0100 | [diff] [blame] | 569 | if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) |
| 570 | return cpuid_eax(0x80000008) & 0xff; |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 571 | |
Paolo Bonzini | 7adacf5 | 2019-12-04 15:50:27 +0100 | [diff] [blame] | 572 | /* |
| 573 | * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with |
| 574 | * custom CPUID. Proceed with whatever the kernel found since these features |
| 575 | * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). |
| 576 | */ |
| 577 | return boot_cpu_data.x86_phys_bits; |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 578 | } |
| 579 | |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 580 | static void kvm_mmu_reset_all_pte_masks(void) |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 581 | { |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 582 | u8 low_phys_bits; |
| 583 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 584 | shadow_user_mask = 0; |
| 585 | shadow_accessed_mask = 0; |
| 586 | shadow_dirty_mask = 0; |
| 587 | shadow_nx_mask = 0; |
| 588 | shadow_x_mask = 0; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 589 | shadow_present_mask = 0; |
| 590 | shadow_acc_track_mask = 0; |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 591 | |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 592 | shadow_phys_bits = kvm_get_shadow_phys_bits(); |
| 593 | |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 594 | /* |
| 595 | * If the CPU has 46 or less physical address bits, then set an |
| 596 | * appropriate mask to guard against L1TF attacks. Otherwise, it is |
| 597 | * assumed that the CPU is not vulnerable to L1TF. |
Kai Huang | 61455bf | 2019-05-03 01:40:25 -0700 | [diff] [blame] | 598 | * |
| 599 | * Some Intel CPUs address the L1 cache using more PA bits than are |
| 600 | * reported by CPUID. Use the PA width of the L1 cache when possible |
| 601 | * to achieve more effective mitigation, e.g. if system RAM overlaps |
| 602 | * the most significant bits of legal physical address space. |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 603 | */ |
Kai Huang | 61455bf | 2019-05-03 01:40:25 -0700 | [diff] [blame] | 604 | shadow_nonpresent_or_rsvd_mask = 0; |
Paolo Bonzini | d43e267 | 2020-05-19 05:34:41 -0400 | [diff] [blame] | 605 | low_phys_bits = boot_cpu_data.x86_phys_bits; |
| 606 | if (boot_cpu_has_bug(X86_BUG_L1TF) && |
| 607 | !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= |
| 608 | 52 - shadow_nonpresent_or_rsvd_mask_len)) { |
| 609 | low_phys_bits = boot_cpu_data.x86_cache_bits |
| 610 | - shadow_nonpresent_or_rsvd_mask_len; |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 611 | shadow_nonpresent_or_rsvd_mask = |
Paolo Bonzini | d43e267 | 2020-05-19 05:34:41 -0400 | [diff] [blame] | 612 | rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); |
| 613 | } |
Kai Huang | 61455bf | 2019-05-03 01:40:25 -0700 | [diff] [blame] | 614 | |
Sean Christopherson | daa07cb | 2018-09-25 13:20:00 -0700 | [diff] [blame] | 615 | shadow_nonpresent_or_rsvd_lower_gfn_mask = |
| 616 | GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 617 | } |
| 618 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 619 | static int is_cpuid_PSE36(void) |
| 620 | { |
| 621 | return 1; |
| 622 | } |
| 623 | |
Avi Kivity | 73b1087 | 2007-01-26 00:56:41 -0800 | [diff] [blame] | 624 | static int is_nx(struct kvm_vcpu *vcpu) |
| 625 | { |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 626 | return vcpu->arch.efer & EFER_NX; |
Avi Kivity | 73b1087 | 2007-01-26 00:56:41 -0800 | [diff] [blame] | 627 | } |
| 628 | |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 629 | static int is_shadow_present_pte(u64 pte) |
| 630 | { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 631 | return (pte != 0) && !is_mmio_spte(pte); |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 632 | } |
| 633 | |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 634 | static int is_large_pte(u64 pte) |
| 635 | { |
| 636 | return pte & PT_PAGE_SIZE_MASK; |
| 637 | } |
| 638 | |
Marcelo Tosatti | 776e663 | 2009-06-10 12:27:03 -0300 | [diff] [blame] | 639 | static int is_last_spte(u64 pte, int level) |
| 640 | { |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 641 | if (level == PG_LEVEL_4K) |
Marcelo Tosatti | 776e663 | 2009-06-10 12:27:03 -0300 | [diff] [blame] | 642 | return 1; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 643 | if (is_large_pte(pte)) |
Marcelo Tosatti | 776e663 | 2009-06-10 12:27:03 -0300 | [diff] [blame] | 644 | return 1; |
| 645 | return 0; |
| 646 | } |
| 647 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 648 | static bool is_executable_pte(u64 spte) |
| 649 | { |
| 650 | return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask; |
| 651 | } |
| 652 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 653 | static kvm_pfn_t spte_to_pfn(u64 pte) |
Avi Kivity | 0b49ea8 | 2008-03-23 15:06:23 +0200 | [diff] [blame] | 654 | { |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 655 | return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
Avi Kivity | 0b49ea8 | 2008-03-23 15:06:23 +0200 | [diff] [blame] | 656 | } |
| 657 | |
Avi Kivity | da928521 | 2007-11-21 13:54:47 +0200 | [diff] [blame] | 658 | static gfn_t pse36_gfn_delta(u32 gpte) |
| 659 | { |
| 660 | int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; |
| 661 | |
| 662 | return (gpte & PT32_DIR_PSE36_MASK) << shift; |
| 663 | } |
| 664 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 665 | #ifdef CONFIG_X86_64 |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 666 | static void __set_spte(u64 *sptep, u64 spte) |
Avi Kivity | e663ee6 | 2007-05-31 15:46:04 +0300 | [diff] [blame] | 667 | { |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 668 | WRITE_ONCE(*sptep, spte); |
Avi Kivity | e663ee6 | 2007-05-31 15:46:04 +0300 | [diff] [blame] | 669 | } |
| 670 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 671 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
Avi Kivity | a9221dd | 2010-06-06 14:48:06 +0300 | [diff] [blame] | 672 | { |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 673 | WRITE_ONCE(*sptep, spte); |
Avi Kivity | a9221dd | 2010-06-06 14:48:06 +0300 | [diff] [blame] | 674 | } |
| 675 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 676 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
| 677 | { |
| 678 | return xchg(sptep, spte); |
| 679 | } |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 680 | |
| 681 | static u64 __get_spte_lockless(u64 *sptep) |
| 682 | { |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 683 | return READ_ONCE(*sptep); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 684 | } |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 685 | #else |
| 686 | union split_spte { |
| 687 | struct { |
| 688 | u32 spte_low; |
| 689 | u32 spte_high; |
| 690 | }; |
| 691 | u64 spte; |
| 692 | }; |
| 693 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 694 | static void count_spte_clear(u64 *sptep, u64 spte) |
| 695 | { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 696 | struct kvm_mmu_page *sp = sptep_to_sp(sptep); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 697 | |
| 698 | if (is_shadow_present_pte(spte)) |
| 699 | return; |
| 700 | |
| 701 | /* Ensure the spte is completely set before we increase the count */ |
| 702 | smp_wmb(); |
| 703 | sp->clear_spte_count++; |
| 704 | } |
| 705 | |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 706 | static void __set_spte(u64 *sptep, u64 spte) |
| 707 | { |
| 708 | union split_spte *ssptep, sspte; |
| 709 | |
| 710 | ssptep = (union split_spte *)sptep; |
| 711 | sspte = (union split_spte)spte; |
| 712 | |
| 713 | ssptep->spte_high = sspte.spte_high; |
| 714 | |
| 715 | /* |
| 716 | * If we map the spte from nonpresent to present, We should store |
| 717 | * the high bits firstly, then set present bit, so cpu can not |
| 718 | * fetch this spte while we are setting the spte. |
| 719 | */ |
| 720 | smp_wmb(); |
| 721 | |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 722 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 723 | } |
| 724 | |
| 725 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
| 726 | { |
| 727 | union split_spte *ssptep, sspte; |
| 728 | |
| 729 | ssptep = (union split_spte *)sptep; |
| 730 | sspte = (union split_spte)spte; |
| 731 | |
Nadav Amit | b19ee2f | 2016-05-11 08:04:29 -0700 | [diff] [blame] | 732 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 733 | |
| 734 | /* |
| 735 | * If we map the spte from present to nonpresent, we should clear |
| 736 | * present bit firstly to avoid vcpu fetch the old high bits. |
| 737 | */ |
| 738 | smp_wmb(); |
| 739 | |
| 740 | ssptep->spte_high = sspte.spte_high; |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 741 | count_spte_clear(sptep, spte); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 742 | } |
| 743 | |
| 744 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
| 745 | { |
| 746 | union split_spte *ssptep, sspte, orig; |
| 747 | |
| 748 | ssptep = (union split_spte *)sptep; |
| 749 | sspte = (union split_spte)spte; |
| 750 | |
| 751 | /* xchg acts as a barrier before the setting of the high bits */ |
| 752 | orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); |
Zhao Jin | 41bc318 | 2011-09-19 12:19:51 +0800 | [diff] [blame] | 753 | orig.spte_high = ssptep->spte_high; |
| 754 | ssptep->spte_high = sspte.spte_high; |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 755 | count_spte_clear(sptep, spte); |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 756 | |
| 757 | return orig.spte; |
| 758 | } |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 759 | |
| 760 | /* |
| 761 | * The idea using the light way get the spte on x86_32 guest is from |
Christoph Hellwig | 39656e8 | 2019-07-11 20:56:49 -0700 | [diff] [blame] | 762 | * gup_get_pte (mm/gup.c). |
Xiao Guangrong | accaefe | 2013-06-19 17:09:20 +0800 | [diff] [blame] | 763 | * |
| 764 | * An spte tlb flush may be pending, because kvm_set_pte_rmapp |
| 765 | * coalesces them and we are running out of the MMU lock. Therefore |
| 766 | * we need to protect against in-progress updates of the spte. |
| 767 | * |
| 768 | * Reading the spte while an update is in progress may get the old value |
| 769 | * for the high part of the spte. The race is fine for a present->non-present |
| 770 | * change (because the high part of the spte is ignored for non-present spte), |
| 771 | * but for a present->present change we must reread the spte. |
| 772 | * |
| 773 | * All such changes are done in two steps (present->non-present and |
| 774 | * non-present->present), hence it is enough to count the number of |
| 775 | * present->non-present updates: if it changed while reading the spte, |
| 776 | * we might have hit the race. This is done using clear_spte_count. |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 777 | */ |
| 778 | static u64 __get_spte_lockless(u64 *sptep) |
| 779 | { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 780 | struct kvm_mmu_page *sp = sptep_to_sp(sptep); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 781 | union split_spte spte, *orig = (union split_spte *)sptep; |
| 782 | int count; |
| 783 | |
| 784 | retry: |
| 785 | count = sp->clear_spte_count; |
| 786 | smp_rmb(); |
| 787 | |
| 788 | spte.spte_low = orig->spte_low; |
| 789 | smp_rmb(); |
| 790 | |
| 791 | spte.spte_high = orig->spte_high; |
| 792 | smp_rmb(); |
| 793 | |
| 794 | if (unlikely(spte.spte_low != orig->spte_low || |
| 795 | count != sp->clear_spte_count)) |
| 796 | goto retry; |
| 797 | |
| 798 | return spte.spte; |
| 799 | } |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 800 | #endif |
| 801 | |
Junaid Shahid | ea4114b | 2016-12-06 16:46:11 -0800 | [diff] [blame] | 802 | static bool spte_can_locklessly_be_made_writable(u64 spte) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 803 | { |
Gleb Natapov | feb3eb7 | 2013-01-30 16:45:00 +0200 | [diff] [blame] | 804 | return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == |
| 805 | (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 806 | } |
| 807 | |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 808 | static bool spte_has_volatile_bits(u64 spte) |
| 809 | { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 810 | if (!is_shadow_present_pte(spte)) |
| 811 | return false; |
| 812 | |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 813 | /* |
Adam Buchbinder | 6a6256f | 2016-02-23 15:34:30 -0800 | [diff] [blame] | 814 | * Always atomically update spte if it can be updated |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 815 | * out of mmu-lock, it can ensure dirty bit is not lost, |
| 816 | * also, it can help us to get a stable is_writable_pte() |
| 817 | * to ensure tlb flush is not missed. |
| 818 | */ |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 819 | if (spte_can_locklessly_be_made_writable(spte) || |
| 820 | is_access_track_spte(spte)) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 821 | return true; |
| 822 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 823 | if (spte_ad_enabled(spte)) { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 824 | if ((spte & shadow_accessed_mask) == 0 || |
| 825 | (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0)) |
| 826 | return true; |
| 827 | } |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 828 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 829 | return false; |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 830 | } |
| 831 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 832 | static bool is_accessed_spte(u64 spte) |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 833 | { |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 834 | u64 accessed_mask = spte_shadow_accessed_mask(spte); |
| 835 | |
| 836 | return accessed_mask ? spte & accessed_mask |
| 837 | : !is_access_track_spte(spte); |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 838 | } |
| 839 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 840 | static bool is_dirty_spte(u64 spte) |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 841 | { |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 842 | u64 dirty_mask = spte_shadow_dirty_mask(spte); |
| 843 | |
| 844 | return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK; |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 845 | } |
| 846 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 847 | /* Rules for using mmu_spte_set: |
| 848 | * Set the sptep from nonpresent to present. |
| 849 | * Note: the sptep being assigned *must* be either not present |
| 850 | * or in a state where the hardware will not attempt to update |
| 851 | * the spte. |
| 852 | */ |
| 853 | static void mmu_spte_set(u64 *sptep, u64 new_spte) |
| 854 | { |
| 855 | WARN_ON(is_shadow_present_pte(*sptep)); |
| 856 | __set_spte(sptep, new_spte); |
| 857 | } |
| 858 | |
Junaid Shahid | f39a058 | 2016-12-06 16:46:14 -0800 | [diff] [blame] | 859 | /* |
| 860 | * Update the SPTE (excluding the PFN), but do not track changes in its |
| 861 | * accessed/dirty status. |
| 862 | */ |
| 863 | static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) |
| 864 | { |
| 865 | u64 old_spte = *sptep; |
| 866 | |
| 867 | WARN_ON(!is_shadow_present_pte(new_spte)); |
| 868 | |
| 869 | if (!is_shadow_present_pte(old_spte)) { |
| 870 | mmu_spte_set(sptep, new_spte); |
| 871 | return old_spte; |
| 872 | } |
| 873 | |
| 874 | if (!spte_has_volatile_bits(old_spte)) |
| 875 | __update_clear_spte_fast(sptep, new_spte); |
| 876 | else |
| 877 | old_spte = __update_clear_spte_slow(sptep, new_spte); |
| 878 | |
| 879 | WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte)); |
| 880 | |
| 881 | return old_spte; |
| 882 | } |
| 883 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 884 | /* Rules for using mmu_spte_update: |
Andrea Gelmini | bb3541f | 2016-05-21 14:14:44 +0200 | [diff] [blame] | 885 | * Update the state bits, it means the mapped pfn is not changed. |
Xiao Guangrong | 6e7d035 | 2012-06-20 15:58:33 +0800 | [diff] [blame] | 886 | * |
| 887 | * Whenever we overwrite a writable spte with a read-only one we |
| 888 | * should flush remote TLBs. Otherwise rmap_write_protect |
| 889 | * will find a read-only spte, even though the writable spte |
| 890 | * might be cached on a CPU's TLB, the return value indicates this |
| 891 | * case. |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 892 | * |
| 893 | * Returns true if the TLB needs to be flushed |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 894 | */ |
Xiao Guangrong | 6e7d035 | 2012-06-20 15:58:33 +0800 | [diff] [blame] | 895 | static bool mmu_spte_update(u64 *sptep, u64 new_spte) |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 896 | { |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 897 | bool flush = false; |
Junaid Shahid | f39a058 | 2016-12-06 16:46:14 -0800 | [diff] [blame] | 898 | u64 old_spte = mmu_spte_update_no_track(sptep, new_spte); |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 899 | |
Junaid Shahid | f39a058 | 2016-12-06 16:46:14 -0800 | [diff] [blame] | 900 | if (!is_shadow_present_pte(old_spte)) |
| 901 | return false; |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 902 | |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 903 | /* |
| 904 | * For the spte updated out of mmu-lock is safe, since |
Adam Buchbinder | 6a6256f | 2016-02-23 15:34:30 -0800 | [diff] [blame] | 905 | * we always atomically update it, see the comments in |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 906 | * spte_has_volatile_bits(). |
| 907 | */ |
Junaid Shahid | ea4114b | 2016-12-06 16:46:11 -0800 | [diff] [blame] | 908 | if (spte_can_locklessly_be_made_writable(old_spte) && |
Xiao Guangrong | 7f31c95 | 2014-04-17 17:06:15 +0800 | [diff] [blame] | 909 | !is_writable_pte(new_spte)) |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 910 | flush = true; |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 911 | |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 912 | /* |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 913 | * Flush TLB when accessed/dirty states are changed in the page tables, |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 914 | * to guarantee consistency between TLB and page tables. |
| 915 | */ |
Kai Huang | 7e71a59 | 2015-01-09 16:44:30 +0800 | [diff] [blame] | 916 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 917 | if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) { |
| 918 | flush = true; |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 919 | kvm_set_pfn_accessed(spte_to_pfn(old_spte)); |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 920 | } |
Xiao Guangrong | 6e7d035 | 2012-06-20 15:58:33 +0800 | [diff] [blame] | 921 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 922 | if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) { |
| 923 | flush = true; |
| 924 | kvm_set_pfn_dirty(spte_to_pfn(old_spte)); |
| 925 | } |
| 926 | |
| 927 | return flush; |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 928 | } |
| 929 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 930 | /* |
| 931 | * Rules for using mmu_spte_clear_track_bits: |
| 932 | * It sets the sptep from present to nonpresent, and track the |
| 933 | * state bits, it is used to clear the last level sptep. |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 934 | * Returns non-zero if the PTE was previously valid. |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 935 | */ |
| 936 | static int mmu_spte_clear_track_bits(u64 *sptep) |
| 937 | { |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 938 | kvm_pfn_t pfn; |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 939 | u64 old_spte = *sptep; |
| 940 | |
| 941 | if (!spte_has_volatile_bits(old_spte)) |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 942 | __update_clear_spte_fast(sptep, 0ull); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 943 | else |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 944 | old_spte = __update_clear_spte_slow(sptep, 0ull); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 945 | |
Takuya Yoshikawa | afd28fe | 2015-11-20 17:44:55 +0900 | [diff] [blame] | 946 | if (!is_shadow_present_pte(old_spte)) |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 947 | return 0; |
| 948 | |
| 949 | pfn = spte_to_pfn(old_spte); |
Xiao Guangrong | 86fde74 | 2012-07-17 21:52:52 +0800 | [diff] [blame] | 950 | |
| 951 | /* |
| 952 | * KVM does not hold the refcount of the page used by |
| 953 | * kvm mmu, before reclaiming the page, we should |
| 954 | * unmap it from mmu first. |
| 955 | */ |
Ard Biesheuvel | bf4bea8 | 2014-11-10 08:33:56 +0000 | [diff] [blame] | 956 | WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); |
Xiao Guangrong | 86fde74 | 2012-07-17 21:52:52 +0800 | [diff] [blame] | 957 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 958 | if (is_accessed_spte(old_spte)) |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 959 | kvm_set_pfn_accessed(pfn); |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 960 | |
| 961 | if (is_dirty_spte(old_spte)) |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 962 | kvm_set_pfn_dirty(pfn); |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 963 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 964 | return 1; |
| 965 | } |
| 966 | |
| 967 | /* |
| 968 | * Rules for using mmu_spte_clear_no_track: |
| 969 | * Directly clear spte without caring the state bits of sptep, |
| 970 | * it is used to set the upper level spte. |
| 971 | */ |
| 972 | static void mmu_spte_clear_no_track(u64 *sptep) |
| 973 | { |
Xiao Guangrong | 603e065 | 2011-07-12 03:31:28 +0800 | [diff] [blame] | 974 | __update_clear_spte_fast(sptep, 0ull); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 975 | } |
| 976 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 977 | static u64 mmu_spte_get_lockless(u64 *sptep) |
| 978 | { |
| 979 | return __get_spte_lockless(sptep); |
| 980 | } |
| 981 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 982 | static u64 mark_spte_for_access_track(u64 spte) |
| 983 | { |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 984 | if (spte_ad_enabled(spte)) |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 985 | return spte & ~shadow_accessed_mask; |
| 986 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 987 | if (is_access_track_spte(spte)) |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 988 | return spte; |
| 989 | |
| 990 | /* |
Junaid Shahid | 20d6523 | 2016-12-21 20:29:31 -0800 | [diff] [blame] | 991 | * Making an Access Tracking PTE will result in removal of write access |
| 992 | * from the PTE. So, verify that we will be able to restore the write |
| 993 | * access in the fast page fault path later on. |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 994 | */ |
| 995 | WARN_ONCE((spte & PT_WRITABLE_MASK) && |
| 996 | !spte_can_locklessly_be_made_writable(spte), |
| 997 | "kvm: Writable SPTE is not locklessly dirty-trackable\n"); |
| 998 | |
| 999 | WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask << |
| 1000 | shadow_acc_track_saved_bits_shift), |
| 1001 | "kvm: Access Tracking saved bit locations are not zero\n"); |
| 1002 | |
| 1003 | spte |= (spte & shadow_acc_track_saved_bits_mask) << |
| 1004 | shadow_acc_track_saved_bits_shift; |
| 1005 | spte &= ~shadow_acc_track_mask; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1006 | |
| 1007 | return spte; |
| 1008 | } |
| 1009 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 1010 | /* Restore an acc-track PTE back to a regular PTE */ |
| 1011 | static u64 restore_acc_track_spte(u64 spte) |
| 1012 | { |
| 1013 | u64 new_spte = spte; |
| 1014 | u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift) |
| 1015 | & shadow_acc_track_saved_bits_mask; |
| 1016 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1017 | WARN_ON_ONCE(spte_ad_enabled(spte)); |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 1018 | WARN_ON_ONCE(!is_access_track_spte(spte)); |
| 1019 | |
| 1020 | new_spte &= ~shadow_acc_track_mask; |
| 1021 | new_spte &= ~(shadow_acc_track_saved_bits_mask << |
| 1022 | shadow_acc_track_saved_bits_shift); |
| 1023 | new_spte |= saved_bits; |
| 1024 | |
| 1025 | return new_spte; |
| 1026 | } |
| 1027 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1028 | /* Returns the Accessed status of the PTE and resets it at the same time. */ |
| 1029 | static bool mmu_spte_age(u64 *sptep) |
| 1030 | { |
| 1031 | u64 spte = mmu_spte_get_lockless(sptep); |
| 1032 | |
| 1033 | if (!is_accessed_spte(spte)) |
| 1034 | return false; |
| 1035 | |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1036 | if (spte_ad_enabled(spte)) { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1037 | clear_bit((ffs(shadow_accessed_mask) - 1), |
| 1038 | (unsigned long *)sptep); |
| 1039 | } else { |
| 1040 | /* |
| 1041 | * Capture the dirty status of the page, so that it doesn't get |
| 1042 | * lost when the SPTE is marked for access tracking. |
| 1043 | */ |
| 1044 | if (is_writable_pte(spte)) |
| 1045 | kvm_set_pfn_dirty(spte_to_pfn(spte)); |
| 1046 | |
| 1047 | spte = mark_spte_for_access_track(spte); |
| 1048 | mmu_spte_update_no_track(sptep, spte); |
| 1049 | } |
| 1050 | |
| 1051 | return true; |
| 1052 | } |
| 1053 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 1054 | static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) |
| 1055 | { |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 1056 | /* |
| 1057 | * Prevent page table teardown by making any free-er wait during |
| 1058 | * kvm_flush_remote_tlbs() IPI to all active vcpus. |
| 1059 | */ |
| 1060 | local_irq_disable(); |
Lan Tianyu | 36ca7e0 | 2016-03-13 11:10:25 +0800 | [diff] [blame] | 1061 | |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 1062 | /* |
| 1063 | * Make sure a following spte read is not reordered ahead of the write |
| 1064 | * to vcpu->mode. |
| 1065 | */ |
Lan Tianyu | 36ca7e0 | 2016-03-13 11:10:25 +0800 | [diff] [blame] | 1066 | smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 1067 | } |
| 1068 | |
| 1069 | static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) |
| 1070 | { |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 1071 | /* |
| 1072 | * Make sure the write to vcpu->mode is not reordered in front of |
Tianyu Lan | 9a98458 | 2018-09-07 05:45:02 +0000 | [diff] [blame] | 1073 | * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 1074 | * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. |
| 1075 | */ |
Lan Tianyu | 36ca7e0 | 2016-03-13 11:10:25 +0800 | [diff] [blame] | 1076 | smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 1077 | local_irq_enable(); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 1078 | } |
| 1079 | |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 1080 | static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) |
Avi Kivity | 8c43850 | 2007-04-16 11:53:17 +0300 | [diff] [blame] | 1081 | { |
| 1082 | int r; |
| 1083 | |
Sean Christopherson | 531281a | 2020-07-02 19:35:32 -0700 | [diff] [blame] | 1084 | /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */ |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 1085 | r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
| 1086 | 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM); |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 1087 | if (r) |
Sean Christopherson | 284aa86 | 2020-07-02 19:35:28 -0700 | [diff] [blame] | 1088 | return r; |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 1089 | r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache, |
| 1090 | PT64_ROOT_MAX_LEVEL); |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 1091 | if (r) |
Sean Christopherson | 171a90d | 2020-07-02 19:35:33 -0700 | [diff] [blame] | 1092 | return r; |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 1093 | if (maybe_indirect) { |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 1094 | r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache, |
| 1095 | PT64_ROOT_MAX_LEVEL); |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 1096 | if (r) |
| 1097 | return r; |
| 1098 | } |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 1099 | return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, |
| 1100 | PT64_ROOT_MAX_LEVEL); |
Avi Kivity | 8c43850 | 2007-04-16 11:53:17 +0300 | [diff] [blame] | 1101 | } |
| 1102 | |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1103 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
| 1104 | { |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 1105 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); |
| 1106 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache); |
| 1107 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache); |
| 1108 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1109 | } |
| 1110 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1111 | static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1112 | { |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 1113 | return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1114 | } |
| 1115 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1116 | static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1117 | { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1118 | kmem_cache_free(pte_list_desc_cache, pte_list_desc); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 1119 | } |
| 1120 | |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1121 | static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) |
| 1122 | { |
| 1123 | if (!sp->role.direct) |
| 1124 | return sp->gfns[index]; |
| 1125 | |
| 1126 | return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); |
| 1127 | } |
| 1128 | |
| 1129 | static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) |
| 1130 | { |
Paolo Bonzini | e9f2a76 | 2019-06-30 08:36:21 -0400 | [diff] [blame] | 1131 | if (!sp->role.direct) { |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1132 | sp->gfns[index] = gfn; |
Paolo Bonzini | e9f2a76 | 2019-06-30 08:36:21 -0400 | [diff] [blame] | 1133 | return; |
| 1134 | } |
| 1135 | |
| 1136 | if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) |
| 1137 | pr_err_ratelimited("gfn mismatch under direct page %llx " |
| 1138 | "(expected %llx, got %llx)\n", |
| 1139 | sp->gfn, |
| 1140 | kvm_mmu_page_get_gfn(sp, index), gfn); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1141 | } |
| 1142 | |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1143 | /* |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 1144 | * Return the pointer to the large page information for a given gfn, |
| 1145 | * handling slots that are not large page aligned. |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1146 | */ |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 1147 | static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, |
| 1148 | struct kvm_memory_slot *slot, |
| 1149 | int level) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1150 | { |
| 1151 | unsigned long idx; |
| 1152 | |
Takuya Yoshikawa | fb03cb6 | 2012-02-08 12:59:10 +0900 | [diff] [blame] | 1153 | idx = gfn_to_index(gfn, slot->base_gfn, level); |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 1154 | return &slot->arch.lpage_info[level - 2][idx]; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1155 | } |
| 1156 | |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 1157 | static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, |
| 1158 | gfn_t gfn, int count) |
| 1159 | { |
| 1160 | struct kvm_lpage_info *linfo; |
| 1161 | int i; |
| 1162 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1163 | for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 1164 | linfo = lpage_info_slot(gfn, slot, i); |
| 1165 | linfo->disallow_lpage += count; |
| 1166 | WARN_ON(linfo->disallow_lpage < 0); |
| 1167 | } |
| 1168 | } |
| 1169 | |
| 1170 | void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) |
| 1171 | { |
| 1172 | update_gfn_disallow_lpage_count(slot, gfn, 1); |
| 1173 | } |
| 1174 | |
| 1175 | void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) |
| 1176 | { |
| 1177 | update_gfn_disallow_lpage_count(slot, gfn, -1); |
| 1178 | } |
| 1179 | |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1180 | static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1181 | { |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1182 | struct kvm_memslots *slots; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 1183 | struct kvm_memory_slot *slot; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1184 | gfn_t gfn; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1185 | |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 1186 | kvm->arch.indirect_shadow_pages++; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1187 | gfn = sp->gfn; |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1188 | slots = kvm_memslots_for_spte_role(kvm, sp->role); |
| 1189 | slot = __gfn_to_memslot(slots, gfn); |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 1190 | |
| 1191 | /* the non-leaf shadow pages are keeping readonly. */ |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1192 | if (sp->role.level > PG_LEVEL_4K) |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 1193 | return kvm_slot_page_track_add_page(kvm, slot, gfn, |
| 1194 | KVM_PAGE_TRACK_WRITE); |
| 1195 | |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 1196 | kvm_mmu_gfn_disallow_lpage(slot, gfn); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1197 | } |
| 1198 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 1199 | static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 1200 | { |
| 1201 | if (sp->lpage_disallowed) |
| 1202 | return; |
| 1203 | |
| 1204 | ++kvm->stat.nx_lpage_splits; |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1205 | list_add_tail(&sp->lpage_disallowed_link, |
| 1206 | &kvm->arch.lpage_disallowed_mmu_pages); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 1207 | sp->lpage_disallowed = true; |
| 1208 | } |
| 1209 | |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1210 | static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1211 | { |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1212 | struct kvm_memslots *slots; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 1213 | struct kvm_memory_slot *slot; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1214 | gfn_t gfn; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1215 | |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 1216 | kvm->arch.indirect_shadow_pages--; |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 1217 | gfn = sp->gfn; |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1218 | slots = kvm_memslots_for_spte_role(kvm, sp->role); |
| 1219 | slot = __gfn_to_memslot(slots, gfn); |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1220 | if (sp->role.level > PG_LEVEL_4K) |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 1221 | return kvm_slot_page_track_remove_page(kvm, slot, gfn, |
| 1222 | KVM_PAGE_TRACK_WRITE); |
| 1223 | |
Xiao Guangrong | 547ffae | 2016-02-24 17:51:07 +0800 | [diff] [blame] | 1224 | kvm_mmu_gfn_allow_lpage(slot, gfn); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1225 | } |
| 1226 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 1227 | static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 1228 | { |
| 1229 | --kvm->stat.nx_lpage_splits; |
| 1230 | sp->lpage_disallowed = false; |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 1231 | list_del(&sp->lpage_disallowed_link); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 1232 | } |
| 1233 | |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 1234 | static struct kvm_memory_slot * |
| 1235 | gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 1236 | bool no_dirty_log) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1237 | { |
| 1238 | struct kvm_memory_slot *slot; |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 1239 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 1240 | slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
Paolo Bonzini | 91b0d26 | 2020-01-21 16:16:32 +0100 | [diff] [blame] | 1241 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID) |
| 1242 | return NULL; |
| 1243 | if (no_dirty_log && slot->dirty_bitmap) |
| 1244 | return NULL; |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 1245 | |
| 1246 | return slot; |
| 1247 | } |
| 1248 | |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1249 | /* |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1250 | * About rmap_head encoding: |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1251 | * |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1252 | * If the bit zero of rmap_head->val is clear, then it points to the only spte |
| 1253 | * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1254 | * pte_list_desc containing more mappings. |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1255 | */ |
| 1256 | |
| 1257 | /* |
| 1258 | * Returns the number of pointers in the rmap chain, not counting the new one. |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1259 | */ |
| 1260 | static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1261 | struct kvm_rmap_head *rmap_head) |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1262 | { |
| 1263 | struct pte_list_desc *desc; |
| 1264 | int i, count = 0; |
| 1265 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1266 | if (!rmap_head->val) { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1267 | rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1268 | rmap_head->val = (unsigned long)spte; |
| 1269 | } else if (!(rmap_head->val & 1)) { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1270 | rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); |
| 1271 | desc = mmu_alloc_pte_list_desc(vcpu); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1272 | desc->sptes[0] = (u64 *)rmap_head->val; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1273 | desc->sptes[1] = spte; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1274 | rmap_head->val = (unsigned long)desc | 1; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1275 | ++count; |
| 1276 | } else { |
| 1277 | rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1278 | desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1279 | while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { |
| 1280 | desc = desc->more; |
| 1281 | count += PTE_LIST_EXT; |
| 1282 | } |
| 1283 | if (desc->sptes[PTE_LIST_EXT-1]) { |
| 1284 | desc->more = mmu_alloc_pte_list_desc(vcpu); |
| 1285 | desc = desc->more; |
| 1286 | } |
| 1287 | for (i = 0; desc->sptes[i]; ++i) |
| 1288 | ++count; |
| 1289 | desc->sptes[i] = spte; |
| 1290 | } |
| 1291 | return count; |
| 1292 | } |
| 1293 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1294 | static void |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1295 | pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, |
| 1296 | struct pte_list_desc *desc, int i, |
| 1297 | struct pte_list_desc *prev_desc) |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1298 | { |
| 1299 | int j; |
| 1300 | |
| 1301 | for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) |
| 1302 | ; |
| 1303 | desc->sptes[i] = desc->sptes[j]; |
| 1304 | desc->sptes[j] = NULL; |
| 1305 | if (j != 0) |
| 1306 | return; |
| 1307 | if (!prev_desc && !desc->more) |
Miaohe Lin | fe3c2b4 | 2019-12-05 11:40:16 +0800 | [diff] [blame] | 1308 | rmap_head->val = 0; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1309 | else |
| 1310 | if (prev_desc) |
| 1311 | prev_desc->more = desc->more; |
| 1312 | else |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1313 | rmap_head->val = (unsigned long)desc->more | 1; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1314 | mmu_free_pte_list_desc(desc); |
| 1315 | } |
| 1316 | |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1317 | static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1318 | { |
| 1319 | struct pte_list_desc *desc; |
| 1320 | struct pte_list_desc *prev_desc; |
| 1321 | int i; |
| 1322 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1323 | if (!rmap_head->val) { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1324 | pr_err("%s: %p 0->BUG\n", __func__, spte); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1325 | BUG(); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1326 | } else if (!(rmap_head->val & 1)) { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1327 | rmap_printk("%s: %p 1->0\n", __func__, spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1328 | if ((u64 *)rmap_head->val != spte) { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1329 | pr_err("%s: %p 1->BUG\n", __func__, spte); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1330 | BUG(); |
| 1331 | } |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1332 | rmap_head->val = 0; |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1333 | } else { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1334 | rmap_printk("%s: %p many->many\n", __func__, spte); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1335 | desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1336 | prev_desc = NULL; |
| 1337 | while (desc) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1338 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1339 | if (desc->sptes[i] == spte) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1340 | pte_list_desc_remove_entry(rmap_head, |
| 1341 | desc, i, prev_desc); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1342 | return; |
| 1343 | } |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1344 | } |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1345 | prev_desc = desc; |
| 1346 | desc = desc->more; |
| 1347 | } |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1348 | pr_err("%s: %p many->many\n", __func__, spte); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 1349 | BUG(); |
| 1350 | } |
| 1351 | } |
| 1352 | |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 1353 | static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) |
| 1354 | { |
| 1355 | mmu_spte_clear_track_bits(sptep); |
| 1356 | __pte_list_remove(sptep, rmap_head); |
| 1357 | } |
| 1358 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1359 | static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, |
| 1360 | struct kvm_memory_slot *slot) |
Takuya Yoshikawa | 9b9b149 | 2011-11-14 18:22:28 +0900 | [diff] [blame] | 1361 | { |
Takuya Yoshikawa | 77d1130 | 2012-07-02 17:57:17 +0900 | [diff] [blame] | 1362 | unsigned long idx; |
Takuya Yoshikawa | 9b9b149 | 2011-11-14 18:22:28 +0900 | [diff] [blame] | 1363 | |
Takuya Yoshikawa | 77d1130 | 2012-07-02 17:57:17 +0900 | [diff] [blame] | 1364 | idx = gfn_to_index(gfn, slot->base_gfn, level); |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1365 | return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; |
Takuya Yoshikawa | 9b9b149 | 2011-11-14 18:22:28 +0900 | [diff] [blame] | 1366 | } |
| 1367 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1368 | static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, |
| 1369 | struct kvm_mmu_page *sp) |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 1370 | { |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1371 | struct kvm_memslots *slots; |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 1372 | struct kvm_memory_slot *slot; |
| 1373 | |
Paolo Bonzini | 699023e | 2015-05-18 15:03:39 +0200 | [diff] [blame] | 1374 | slots = kvm_memslots_for_spte_role(kvm, sp->role); |
| 1375 | slot = __gfn_to_memslot(slots, gfn); |
Paolo Bonzini | e4cd1da | 2015-05-18 15:11:46 +0200 | [diff] [blame] | 1376 | return __gfn_to_rmap(gfn, sp->role.level, slot); |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 1377 | } |
| 1378 | |
Xiao Guangrong | f759e2b | 2011-09-22 16:53:17 +0800 | [diff] [blame] | 1379 | static bool rmap_can_add(struct kvm_vcpu *vcpu) |
| 1380 | { |
Sean Christopherson | 356ec69 | 2020-07-02 19:35:27 -0700 | [diff] [blame] | 1381 | struct kvm_mmu_memory_cache *mc; |
Xiao Guangrong | f759e2b | 2011-09-22 16:53:17 +0800 | [diff] [blame] | 1382 | |
Sean Christopherson | 356ec69 | 2020-07-02 19:35:27 -0700 | [diff] [blame] | 1383 | mc = &vcpu->arch.mmu_pte_list_desc_cache; |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 1384 | return kvm_mmu_memory_cache_nr_free_objects(mc); |
Xiao Guangrong | f759e2b | 2011-09-22 16:53:17 +0800 | [diff] [blame] | 1385 | } |
| 1386 | |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 1387 | static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1388 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1389 | struct kvm_mmu_page *sp; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1390 | struct kvm_rmap_head *rmap_head; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1391 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 1392 | sp = sptep_to_sp(spte); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1393 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1394 | rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); |
| 1395 | return pte_list_add(vcpu, spte, rmap_head); |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1396 | } |
| 1397 | |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 1398 | static void rmap_remove(struct kvm *kvm, u64 *spte) |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1399 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1400 | struct kvm_mmu_page *sp; |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1401 | gfn_t gfn; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1402 | struct kvm_rmap_head *rmap_head; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1403 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 1404 | sp = sptep_to_sp(spte); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1405 | gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1406 | rmap_head = gfn_to_rmap(kvm, gfn, sp); |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 1407 | __pte_list_remove(spte, rmap_head); |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1408 | } |
| 1409 | |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1410 | /* |
| 1411 | * Used by the following functions to iterate through the sptes linked by a |
| 1412 | * rmap. All fields are private and not assumed to be used outside. |
| 1413 | */ |
| 1414 | struct rmap_iterator { |
| 1415 | /* private fields */ |
| 1416 | struct pte_list_desc *desc; /* holds the sptep if not NULL */ |
| 1417 | int pos; /* index of the sptep */ |
| 1418 | }; |
| 1419 | |
| 1420 | /* |
| 1421 | * Iteration must be started by this function. This should also be used after |
| 1422 | * removing/dropping sptes from the rmap link because in such cases the |
Miaohe Lin | 0a03cbd | 2019-12-06 16:20:18 +0800 | [diff] [blame] | 1423 | * information in the iterator may not be valid. |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1424 | * |
| 1425 | * Returns sptep if found, NULL otherwise. |
| 1426 | */ |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1427 | static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head, |
| 1428 | struct rmap_iterator *iter) |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1429 | { |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1430 | u64 *sptep; |
| 1431 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1432 | if (!rmap_head->val) |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1433 | return NULL; |
| 1434 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1435 | if (!(rmap_head->val & 1)) { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1436 | iter->desc = NULL; |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1437 | sptep = (u64 *)rmap_head->val; |
| 1438 | goto out; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1439 | } |
| 1440 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1441 | iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1442 | iter->pos = 0; |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1443 | sptep = iter->desc->sptes[iter->pos]; |
| 1444 | out: |
| 1445 | BUG_ON(!is_shadow_present_pte(*sptep)); |
| 1446 | return sptep; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1447 | } |
| 1448 | |
| 1449 | /* |
| 1450 | * Must be used with a valid iterator: e.g. after rmap_get_first(). |
| 1451 | * |
| 1452 | * Returns sptep if found, NULL otherwise. |
| 1453 | */ |
| 1454 | static u64 *rmap_get_next(struct rmap_iterator *iter) |
| 1455 | { |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1456 | u64 *sptep; |
| 1457 | |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1458 | if (iter->desc) { |
| 1459 | if (iter->pos < PTE_LIST_EXT - 1) { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1460 | ++iter->pos; |
| 1461 | sptep = iter->desc->sptes[iter->pos]; |
| 1462 | if (sptep) |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1463 | goto out; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1464 | } |
| 1465 | |
| 1466 | iter->desc = iter->desc->more; |
| 1467 | |
| 1468 | if (iter->desc) { |
| 1469 | iter->pos = 0; |
| 1470 | /* desc->sptes[0] cannot be NULL */ |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1471 | sptep = iter->desc->sptes[iter->pos]; |
| 1472 | goto out; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1473 | } |
| 1474 | } |
| 1475 | |
| 1476 | return NULL; |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1477 | out: |
| 1478 | BUG_ON(!is_shadow_present_pte(*sptep)); |
| 1479 | return sptep; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1480 | } |
| 1481 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1482 | #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \ |
| 1483 | for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \ |
Takuya Yoshikawa | 77fbbbd | 2015-11-20 17:45:44 +0900 | [diff] [blame] | 1484 | _spte_; _spte_ = rmap_get_next(_iter_)) |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1485 | |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 1486 | static void drop_spte(struct kvm *kvm, u64 *sptep) |
Xiao Guangrong | e4b502e | 2010-07-16 11:28:09 +0800 | [diff] [blame] | 1487 | { |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 1488 | if (mmu_spte_clear_track_bits(sptep)) |
Marcelo Tosatti | eb45fda | 2010-10-25 11:58:22 -0200 | [diff] [blame] | 1489 | rmap_remove(kvm, sptep); |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 1490 | } |
| 1491 | |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1492 | |
| 1493 | static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) |
| 1494 | { |
| 1495 | if (is_large_pte(*sptep)) { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 1496 | WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K); |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1497 | drop_spte(kvm, sptep); |
| 1498 | --kvm->stat.lpages; |
| 1499 | return true; |
| 1500 | } |
| 1501 | |
| 1502 | return false; |
| 1503 | } |
| 1504 | |
| 1505 | static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) |
| 1506 | { |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 1507 | if (__drop_large_spte(vcpu->kvm, sptep)) { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 1508 | struct kvm_mmu_page *sp = sptep_to_sp(sptep); |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 1509 | |
| 1510 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, |
| 1511 | KVM_PAGES_PER_HPAGE(sp->role.level)); |
| 1512 | } |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1513 | } |
| 1514 | |
| 1515 | /* |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1516 | * Write-protect on the specified @sptep, @pt_protect indicates whether |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 1517 | * spte write-protection is caused by protecting shadow page table. |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1518 | * |
Tiejun Chen | b461966 | 2014-09-22 10:31:38 +0800 | [diff] [blame] | 1519 | * Note: write protection is difference between dirty logging and spte |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1520 | * protection: |
| 1521 | * - for dirty logging, the spte can be set to writable at anytime if |
| 1522 | * its dirty bitmap is properly set. |
| 1523 | * - for spte protection, the spte can be writable only after unsync-ing |
| 1524 | * shadow page. |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1525 | * |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 1526 | * Return true if tlb need be flushed. |
Xiao Guangrong | 8e22f95 | 2012-06-20 15:57:39 +0800 | [diff] [blame] | 1527 | */ |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1528 | static bool spte_write_protect(u64 *sptep, bool pt_protect) |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1529 | { |
| 1530 | u64 spte = *sptep; |
| 1531 | |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1532 | if (!is_writable_pte(spte) && |
Junaid Shahid | ea4114b | 2016-12-06 16:46:11 -0800 | [diff] [blame] | 1533 | !(pt_protect && spte_can_locklessly_be_made_writable(spte))) |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1534 | return false; |
| 1535 | |
| 1536 | rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); |
| 1537 | |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1538 | if (pt_protect) |
| 1539 | spte &= ~SPTE_MMU_WRITEABLE; |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1540 | spte = spte & ~PT_WRITABLE_MASK; |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 1541 | |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 1542 | return mmu_spte_update(sptep, spte); |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1543 | } |
| 1544 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1545 | static bool __rmap_write_protect(struct kvm *kvm, |
| 1546 | struct kvm_rmap_head *rmap_head, |
Takuya Yoshikawa | 245c391 | 2013-01-08 19:44:09 +0900 | [diff] [blame] | 1547 | bool pt_protect) |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1548 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1549 | u64 *sptep; |
| 1550 | struct rmap_iterator iter; |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1551 | bool flush = false; |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1552 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1553 | for_each_rmap_spte(rmap_head, &iter, sptep) |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1554 | flush |= spte_write_protect(sptep, pt_protect); |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1555 | |
Xiao Guangrong | d13bc5b | 2012-06-20 15:57:15 +0800 | [diff] [blame] | 1556 | return flush; |
Takuya Yoshikawa | a0ed460 | 2012-03-01 19:31:22 +0900 | [diff] [blame] | 1557 | } |
| 1558 | |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1559 | static bool spte_clear_dirty(u64 *sptep) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1560 | { |
| 1561 | u64 spte = *sptep; |
| 1562 | |
| 1563 | rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); |
| 1564 | |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1565 | MMU_WARN_ON(!spte_ad_enabled(spte)); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1566 | spte &= ~shadow_dirty_mask; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1567 | return mmu_spte_update(sptep, spte); |
| 1568 | } |
| 1569 | |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1570 | static bool spte_wrprot_for_clear_dirty(u64 *sptep) |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1571 | { |
| 1572 | bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT, |
| 1573 | (unsigned long *)sptep); |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1574 | if (was_writable && !spte_ad_enabled(*sptep)) |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1575 | kvm_set_pfn_dirty(spte_to_pfn(*sptep)); |
| 1576 | |
| 1577 | return was_writable; |
| 1578 | } |
| 1579 | |
| 1580 | /* |
| 1581 | * Gets the GFN ready for another round of dirty logging by clearing the |
| 1582 | * - D bit on ad-enabled SPTEs, and |
| 1583 | * - W bit on ad-disabled SPTEs. |
| 1584 | * Returns true iff any D or W bits were cleared. |
| 1585 | */ |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1586 | static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1587 | { |
| 1588 | u64 *sptep; |
| 1589 | struct rmap_iterator iter; |
| 1590 | bool flush = false; |
| 1591 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1592 | for_each_rmap_spte(rmap_head, &iter, sptep) |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1593 | if (spte_ad_need_write_protect(*sptep)) |
| 1594 | flush |= spte_wrprot_for_clear_dirty(sptep); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1595 | else |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1596 | flush |= spte_clear_dirty(sptep); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1597 | |
| 1598 | return flush; |
| 1599 | } |
| 1600 | |
Bandan Das | c4f138b | 2016-08-02 16:32:37 -0400 | [diff] [blame] | 1601 | static bool spte_set_dirty(u64 *sptep) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1602 | { |
| 1603 | u64 spte = *sptep; |
| 1604 | |
| 1605 | rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); |
| 1606 | |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1607 | /* |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 1608 | * Similar to the !kvm_x86_ops.slot_disable_log_dirty case, |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 1609 | * do not bother adding back write access to pages marked |
| 1610 | * SPTE_AD_WRPROT_ONLY_MASK. |
| 1611 | */ |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1612 | spte |= shadow_dirty_mask; |
| 1613 | |
| 1614 | return mmu_spte_update(sptep, spte); |
| 1615 | } |
| 1616 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1617 | static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1618 | { |
| 1619 | u64 *sptep; |
| 1620 | struct rmap_iterator iter; |
| 1621 | bool flush = false; |
| 1622 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1623 | for_each_rmap_spte(rmap_head, &iter, sptep) |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1624 | if (spte_ad_enabled(*sptep)) |
| 1625 | flush |= spte_set_dirty(sptep); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1626 | |
| 1627 | return flush; |
| 1628 | } |
| 1629 | |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1630 | /** |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1631 | * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1632 | * @kvm: kvm instance |
| 1633 | * @slot: slot to protect |
| 1634 | * @gfn_offset: start of the BITS_PER_LONG pages we care about |
| 1635 | * @mask: indicates which pages we should protect |
| 1636 | * |
| 1637 | * Used when we do not need to care about huge page mappings: e.g. during dirty |
| 1638 | * logging we do not have any such mappings. |
| 1639 | */ |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1640 | static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1641 | struct kvm_memory_slot *slot, |
| 1642 | gfn_t gfn_offset, unsigned long mask) |
Izik Eidus | 98348e9 | 2007-10-16 14:42:30 +0200 | [diff] [blame] | 1643 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1644 | struct kvm_rmap_head *rmap_head; |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1645 | |
| 1646 | while (mask) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1647 | rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1648 | PG_LEVEL_4K, slot); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1649 | __rmap_write_protect(kvm, rmap_head, false); |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1650 | |
| 1651 | /* clear the first set bit */ |
| 1652 | mask &= mask - 1; |
| 1653 | } |
| 1654 | } |
| 1655 | |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1656 | /** |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 1657 | * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write |
| 1658 | * protect the page if the D-bit isn't supported. |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1659 | * @kvm: kvm instance |
| 1660 | * @slot: slot to clear D-bit |
| 1661 | * @gfn_offset: start of the BITS_PER_LONG pages we care about |
| 1662 | * @mask: indicates which pages we should clear D-bit |
| 1663 | * |
| 1664 | * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. |
| 1665 | */ |
| 1666 | void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, |
| 1667 | struct kvm_memory_slot *slot, |
| 1668 | gfn_t gfn_offset, unsigned long mask) |
| 1669 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1670 | struct kvm_rmap_head *rmap_head; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1671 | |
| 1672 | while (mask) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1673 | rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1674 | PG_LEVEL_4K, slot); |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1675 | __rmap_clear_dirty(kvm, rmap_head); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 1676 | |
| 1677 | /* clear the first set bit */ |
| 1678 | mask &= mask - 1; |
| 1679 | } |
| 1680 | } |
| 1681 | EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked); |
| 1682 | |
| 1683 | /** |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1684 | * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected |
| 1685 | * PT level pages. |
| 1686 | * |
| 1687 | * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to |
| 1688 | * enable dirty logging for them. |
| 1689 | * |
| 1690 | * Used when we do not need to care about huge page mappings: e.g. during dirty |
| 1691 | * logging we do not have any such mappings. |
| 1692 | */ |
| 1693 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
| 1694 | struct kvm_memory_slot *slot, |
| 1695 | gfn_t gfn_offset, unsigned long mask) |
| 1696 | { |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 1697 | if (kvm_x86_ops.enable_log_dirty_pt_masked) |
| 1698 | kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset, |
Kai Huang | 88178fd | 2015-01-28 10:54:27 +0800 | [diff] [blame] | 1699 | mask); |
| 1700 | else |
| 1701 | kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 1702 | } |
| 1703 | |
Xiao Guangrong | aeecee2 | 2016-02-24 17:51:08 +0800 | [diff] [blame] | 1704 | bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, |
| 1705 | struct kvm_memory_slot *slot, u64 gfn) |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1706 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1707 | struct kvm_rmap_head *rmap_head; |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1708 | int i; |
Xiao Guangrong | 2f84569 | 2012-06-20 15:56:53 +0800 | [diff] [blame] | 1709 | bool write_protected = false; |
Takuya Yoshikawa | 5dc99b23 | 2012-03-01 19:32:16 +0900 | [diff] [blame] | 1710 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1711 | for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1712 | rmap_head = __gfn_to_rmap(gfn, i, slot); |
Xiao Guangrong | aeecee2 | 2016-02-24 17:51:08 +0800 | [diff] [blame] | 1713 | write_protected |= __rmap_write_protect(kvm, rmap_head, true); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1714 | } |
| 1715 | |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 1716 | return write_protected; |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1717 | } |
| 1718 | |
Xiao Guangrong | aeecee2 | 2016-02-24 17:51:08 +0800 | [diff] [blame] | 1719 | static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) |
| 1720 | { |
| 1721 | struct kvm_memory_slot *slot; |
| 1722 | |
| 1723 | slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 1724 | return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn); |
| 1725 | } |
| 1726 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1727 | static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) |
Xiao Guangrong | 6a49f85 | 2015-05-13 14:42:25 +0800 | [diff] [blame] | 1728 | { |
| 1729 | u64 *sptep; |
| 1730 | struct rmap_iterator iter; |
| 1731 | bool flush = false; |
| 1732 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1733 | while ((sptep = rmap_get_first(rmap_head, &iter))) { |
Xiao Guangrong | 6a49f85 | 2015-05-13 14:42:25 +0800 | [diff] [blame] | 1734 | rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); |
| 1735 | |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 1736 | pte_list_remove(rmap_head, sptep); |
Xiao Guangrong | 6a49f85 | 2015-05-13 14:42:25 +0800 | [diff] [blame] | 1737 | flush = true; |
| 1738 | } |
| 1739 | |
| 1740 | return flush; |
| 1741 | } |
| 1742 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1743 | static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1744 | struct kvm_memory_slot *slot, gfn_t gfn, int level, |
| 1745 | unsigned long data) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1746 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1747 | return kvm_zap_rmapp(kvm, rmap_head); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1748 | } |
| 1749 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1750 | static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1751 | struct kvm_memory_slot *slot, gfn_t gfn, int level, |
| 1752 | unsigned long data) |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1753 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1754 | u64 *sptep; |
| 1755 | struct rmap_iterator iter; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1756 | int need_flush = 0; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1757 | u64 new_spte; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1758 | pte_t *ptep = (pte_t *)data; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 1759 | kvm_pfn_t new_pfn; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1760 | |
| 1761 | WARN_ON(pte_huge(*ptep)); |
| 1762 | new_pfn = pte_pfn(*ptep); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1763 | |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1764 | restart: |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1765 | for_each_rmap_spte(rmap_head, &iter, sptep) { |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1766 | rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1767 | sptep, *sptep, gfn, level); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1768 | |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1769 | need_flush = 1; |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1770 | |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1771 | if (pte_write(*ptep)) { |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 1772 | pte_list_remove(rmap_head, sptep); |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1773 | goto restart; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1774 | } else { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1775 | new_spte = *sptep & ~PT64_BASE_ADDR_MASK; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1776 | new_spte |= (u64)new_pfn << PAGE_SHIFT; |
| 1777 | |
| 1778 | new_spte &= ~PT_WRITABLE_MASK; |
| 1779 | new_spte &= ~SPTE_HOST_WRITEABLE; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1780 | |
| 1781 | new_spte = mark_spte_for_access_track(new_spte); |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1782 | |
| 1783 | mmu_spte_clear_track_bits(sptep); |
| 1784 | mmu_spte_set(sptep, new_spte); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1785 | } |
| 1786 | } |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1787 | |
Lan Tianyu | 3cc5ea9 | 2018-12-06 21:21:12 +0800 | [diff] [blame] | 1788 | if (need_flush && kvm_available_flush_tlb_with_range()) { |
| 1789 | kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); |
| 1790 | return 0; |
| 1791 | } |
| 1792 | |
Lan Tianyu | 0cf853c | 2018-12-06 21:21:11 +0800 | [diff] [blame] | 1793 | return need_flush; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1794 | } |
| 1795 | |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1796 | struct slot_rmap_walk_iterator { |
| 1797 | /* input fields. */ |
| 1798 | struct kvm_memory_slot *slot; |
| 1799 | gfn_t start_gfn; |
| 1800 | gfn_t end_gfn; |
| 1801 | int start_level; |
| 1802 | int end_level; |
| 1803 | |
| 1804 | /* output fields. */ |
| 1805 | gfn_t gfn; |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1806 | struct kvm_rmap_head *rmap; |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1807 | int level; |
| 1808 | |
| 1809 | /* private field. */ |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1810 | struct kvm_rmap_head *end_rmap; |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1811 | }; |
| 1812 | |
| 1813 | static void |
| 1814 | rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) |
| 1815 | { |
| 1816 | iterator->level = level; |
| 1817 | iterator->gfn = iterator->start_gfn; |
| 1818 | iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot); |
| 1819 | iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level, |
| 1820 | iterator->slot); |
| 1821 | } |
| 1822 | |
| 1823 | static void |
| 1824 | slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, |
| 1825 | struct kvm_memory_slot *slot, int start_level, |
| 1826 | int end_level, gfn_t start_gfn, gfn_t end_gfn) |
| 1827 | { |
| 1828 | iterator->slot = slot; |
| 1829 | iterator->start_level = start_level; |
| 1830 | iterator->end_level = end_level; |
| 1831 | iterator->start_gfn = start_gfn; |
| 1832 | iterator->end_gfn = end_gfn; |
| 1833 | |
| 1834 | rmap_walk_init_level(iterator, iterator->start_level); |
| 1835 | } |
| 1836 | |
| 1837 | static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator) |
| 1838 | { |
| 1839 | return !!iterator->rmap; |
| 1840 | } |
| 1841 | |
| 1842 | static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) |
| 1843 | { |
| 1844 | if (++iterator->rmap <= iterator->end_rmap) { |
| 1845 | iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); |
| 1846 | return; |
| 1847 | } |
| 1848 | |
| 1849 | if (++iterator->level > iterator->end_level) { |
| 1850 | iterator->rmap = NULL; |
| 1851 | return; |
| 1852 | } |
| 1853 | |
| 1854 | rmap_walk_init_level(iterator, iterator->level); |
| 1855 | } |
| 1856 | |
| 1857 | #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \ |
| 1858 | _start_gfn, _end_gfn, _iter_) \ |
| 1859 | for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \ |
| 1860 | _end_level_, _start_gfn, _end_gfn); \ |
| 1861 | slot_rmap_walk_okay(_iter_); \ |
| 1862 | slot_rmap_walk_next(_iter_)) |
| 1863 | |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 1864 | static int kvm_handle_hva_range(struct kvm *kvm, |
| 1865 | unsigned long start, |
| 1866 | unsigned long end, |
| 1867 | unsigned long data, |
| 1868 | int (*handler)(struct kvm *kvm, |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1869 | struct kvm_rmap_head *rmap_head, |
Takuya Yoshikawa | 048212d | 2012-07-02 17:57:59 +0900 | [diff] [blame] | 1870 | struct kvm_memory_slot *slot, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1871 | gfn_t gfn, |
| 1872 | int level, |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 1873 | unsigned long data)) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1874 | { |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 1875 | struct kvm_memslots *slots; |
Xiao Guangrong | be6ba0f | 2011-11-24 17:39:18 +0800 | [diff] [blame] | 1876 | struct kvm_memory_slot *memslot; |
Xiao Guangrong | 6ce1f4e | 2015-05-13 14:42:22 +0800 | [diff] [blame] | 1877 | struct slot_rmap_walk_iterator iterator; |
| 1878 | int ret = 0; |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 1879 | int i; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1880 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 1881 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 1882 | slots = __kvm_memslots(kvm, i); |
| 1883 | kvm_for_each_memslot(memslot, slots) { |
| 1884 | unsigned long hva_start, hva_end; |
| 1885 | gfn_t gfn_start, gfn_end; |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 1886 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 1887 | hva_start = max(start, memslot->userspace_addr); |
| 1888 | hva_end = min(end, memslot->userspace_addr + |
| 1889 | (memslot->npages << PAGE_SHIFT)); |
| 1890 | if (hva_start >= hva_end) |
| 1891 | continue; |
| 1892 | /* |
| 1893 | * {gfn(page) | page intersects with [hva_start, hva_end)} = |
| 1894 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. |
| 1895 | */ |
| 1896 | gfn_start = hva_to_gfn_memslot(hva_start, memslot); |
| 1897 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1898 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 1899 | for_each_slot_rmap_range(memslot, PG_LEVEL_4K, |
Sean Christopherson | e662ec3 | 2020-04-27 17:54:21 -0700 | [diff] [blame] | 1900 | KVM_MAX_HUGEPAGE_LEVEL, |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 1901 | gfn_start, gfn_end - 1, |
| 1902 | &iterator) |
| 1903 | ret |= handler(kvm, iterator.rmap, memslot, |
| 1904 | iterator.gfn, iterator.level, data); |
| 1905 | } |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1906 | } |
| 1907 | |
Takuya Yoshikawa | f395302 | 2012-07-02 17:58:48 +0900 | [diff] [blame] | 1908 | return ret; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1909 | } |
| 1910 | |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 1911 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, |
| 1912 | unsigned long data, |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1913 | int (*handler)(struct kvm *kvm, |
| 1914 | struct kvm_rmap_head *rmap_head, |
Takuya Yoshikawa | 048212d | 2012-07-02 17:57:59 +0900 | [diff] [blame] | 1915 | struct kvm_memory_slot *slot, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1916 | gfn_t gfn, int level, |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 1917 | unsigned long data)) |
| 1918 | { |
| 1919 | return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1920 | } |
| 1921 | |
Will Deacon | fdfe7cb | 2020-08-11 11:27:24 +0100 | [diff] [blame] | 1922 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, |
| 1923 | unsigned flags) |
Takuya Yoshikawa | b3ae209 | 2012-07-02 17:56:33 +0900 | [diff] [blame] | 1924 | { |
| 1925 | return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); |
| 1926 | } |
| 1927 | |
Lan Tianyu | 748c0e3 | 2018-12-06 21:21:10 +0800 | [diff] [blame] | 1928 | int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1929 | { |
Lan Tianyu | 0cf853c | 2018-12-06 21:21:11 +0800 | [diff] [blame] | 1930 | return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1931 | } |
| 1932 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1933 | static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1934 | struct kvm_memory_slot *slot, gfn_t gfn, int level, |
| 1935 | unsigned long data) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1936 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1937 | u64 *sptep; |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 1938 | struct rmap_iterator iter; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1939 | int young = 0; |
| 1940 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 1941 | for_each_rmap_spte(rmap_head, &iter, sptep) |
| 1942 | young |= mmu_spte_age(sptep); |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 1943 | |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1944 | trace_kvm_age_page(gfn, level, slot, young); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1945 | return young; |
| 1946 | } |
| 1947 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1948 | static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, |
Andres Lagar-Cavilla | 8a9522d | 2014-09-23 12:34:54 -0700 | [diff] [blame] | 1949 | struct kvm_memory_slot *slot, gfn_t gfn, |
| 1950 | int level, unsigned long data) |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1951 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 1952 | u64 *sptep; |
| 1953 | struct rmap_iterator iter; |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1954 | |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 1955 | for_each_rmap_spte(rmap_head, &iter, sptep) |
| 1956 | if (is_accessed_spte(*sptep)) |
| 1957 | return 1; |
Junaid Shahid | 83ef6c8 | 2016-12-06 16:46:13 -0800 | [diff] [blame] | 1958 | return 0; |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1959 | } |
| 1960 | |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1961 | #define RMAP_RECYCLE_THRESHOLD 1000 |
| 1962 | |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 1963 | static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1964 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1965 | struct kvm_rmap_head *rmap_head; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 1966 | struct kvm_mmu_page *sp; |
| 1967 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 1968 | sp = sptep_to_sp(spte); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1969 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1970 | rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1971 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 1972 | kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 1973 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, |
| 1974 | KVM_PAGES_PER_HPAGE(sp->role.level)); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1975 | } |
| 1976 | |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 1977 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1978 | { |
Andres Lagar-Cavilla | 5712846 | 2014-09-22 14:54:42 -0700 | [diff] [blame] | 1979 | return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1980 | } |
| 1981 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1982 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) |
| 1983 | { |
| 1984 | return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); |
| 1985 | } |
| 1986 | |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 1987 | #ifdef MMU_DEBUG |
Avi Kivity | 47ad8e6 | 2007-05-06 15:50:58 +0300 | [diff] [blame] | 1988 | static int is_empty_shadow_page(u64 *spt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1989 | { |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 1990 | u64 *pos; |
| 1991 | u64 *end; |
| 1992 | |
Avi Kivity | 47ad8e6 | 2007-05-06 15:50:58 +0300 | [diff] [blame] | 1993 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
Avi Kivity | 3c91551 | 2008-05-20 16:21:13 +0300 | [diff] [blame] | 1994 | if (is_shadow_present_pte(*pos)) { |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 1995 | printk(KERN_ERR "%s: %p %llx\n", __func__, |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 1996 | pos, *pos); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1997 | return 0; |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 1998 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1999 | return 1; |
| 2000 | } |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 2001 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2002 | |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 2003 | /* |
| 2004 | * This value is the sum of all of the kvm instances's |
| 2005 | * kvm->arch.n_used_mmu_pages values. We need a global, |
| 2006 | * aggregate version in order to make the slab shrinker |
| 2007 | * faster |
| 2008 | */ |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 2009 | static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 2010 | { |
| 2011 | kvm->arch.n_used_mmu_pages += nr; |
| 2012 | percpu_counter_add(&kvm_total_used_mmu_pages, nr); |
| 2013 | } |
| 2014 | |
Gleb Natapov | 834be0d | 2013-01-30 16:45:05 +0200 | [diff] [blame] | 2015 | static void kvm_mmu_free_page(struct kvm_mmu_page *sp) |
Avi Kivity | 260746c | 2007-01-05 16:36:49 -0800 | [diff] [blame] | 2016 | { |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 2017 | MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2018 | hlist_del(&sp->hash_link); |
Xiao Guangrong | bd4c86e | 2011-07-12 03:27:14 +0800 | [diff] [blame] | 2019 | list_del(&sp->link); |
| 2020 | free_page((unsigned long)sp->spt); |
Gleb Natapov | 834be0d | 2013-01-30 16:45:05 +0200 | [diff] [blame] | 2021 | if (!sp->role.direct) |
| 2022 | free_page((unsigned long)sp->gfns); |
Xiao Guangrong | e8ad9a7 | 2010-05-13 10:06:02 +0800 | [diff] [blame] | 2023 | kmem_cache_free(mmu_page_header_cache, sp); |
Avi Kivity | 260746c | 2007-01-05 16:36:49 -0800 | [diff] [blame] | 2024 | } |
| 2025 | |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2026 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
| 2027 | { |
David Matlack | 114df30 | 2016-12-19 13:58:25 -0800 | [diff] [blame] | 2028 | return hash_64(gfn, KVM_MMU_HASH_SHIFT); |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2029 | } |
| 2030 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2031 | static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, |
| 2032 | struct kvm_mmu_page *sp, u64 *parent_pte) |
| 2033 | { |
| 2034 | if (!parent_pte) |
| 2035 | return; |
| 2036 | |
| 2037 | pte_list_add(vcpu, parent_pte, &sp->parent_ptes); |
| 2038 | } |
| 2039 | |
| 2040 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, |
| 2041 | u64 *parent_pte) |
| 2042 | { |
Wei Yang | 8daf346 | 2018-10-04 10:04:22 +0800 | [diff] [blame] | 2043 | __pte_list_remove(parent_pte, &sp->parent_ptes); |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2044 | } |
| 2045 | |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2046 | static void drop_parent_pte(struct kvm_mmu_page *sp, |
| 2047 | u64 *parent_pte) |
| 2048 | { |
| 2049 | mmu_page_remove_parent_pte(sp, parent_pte); |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 2050 | mmu_spte_clear_no_track(parent_pte); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2051 | } |
| 2052 | |
Takuya Yoshikawa | 4700579 | 2015-11-20 17:46:29 +0900 | [diff] [blame] | 2053 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2054 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2055 | struct kvm_mmu_page *sp; |
Takuya Yoshikawa | 7ddca7e | 2013-03-21 19:33:43 +0900 | [diff] [blame] | 2056 | |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 2057 | sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); |
| 2058 | sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 2059 | if (!direct) |
Sean Christopherson | 94ce87e | 2020-07-02 19:35:37 -0700 | [diff] [blame] | 2060 | sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2061 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 2062 | |
| 2063 | /* |
| 2064 | * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages() |
| 2065 | * depends on valid pages being added to the head of the list. See |
| 2066 | * comments in kvm_zap_obsolete_pages(). |
| 2067 | */ |
Sean Christopherson | ca333ad | 2019-09-12 19:46:11 -0700 | [diff] [blame] | 2068 | sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; |
Zhang Xiantao | f05e70a | 2007-12-14 10:01:48 +0800 | [diff] [blame] | 2069 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 2070 | kvm_mod_used_mmu_pages(vcpu->kvm, +1); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2071 | return sp; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2072 | } |
| 2073 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2074 | static void mark_unsync(u64 *spte); |
Xiao Guangrong | 6b18493 | 2010-04-16 21:29:17 +0800 | [diff] [blame] | 2075 | static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) |
Marcelo Tosatti | 0074ff6 | 2008-09-23 13:18:40 -0300 | [diff] [blame] | 2076 | { |
Takuya Yoshikawa | 74c4e63 | 2015-11-26 21:15:38 +0900 | [diff] [blame] | 2077 | u64 *sptep; |
| 2078 | struct rmap_iterator iter; |
| 2079 | |
| 2080 | for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { |
| 2081 | mark_unsync(sptep); |
| 2082 | } |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 2083 | } |
| 2084 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2085 | static void mark_unsync(u64 *spte) |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 2086 | { |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 2087 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 2088 | unsigned int index; |
| 2089 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 2090 | sp = sptep_to_sp(spte); |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 2091 | index = spte - sp->spt; |
| 2092 | if (__test_and_set_bit(index, sp->unsync_child_bitmap)) |
| 2093 | return; |
| 2094 | if (sp->unsync_children++) |
| 2095 | return; |
| 2096 | kvm_mmu_mark_parents_unsync(sp); |
Marcelo Tosatti | 0074ff6 | 2008-09-23 13:18:40 -0300 | [diff] [blame] | 2097 | } |
| 2098 | |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 2099 | static int nonpaging_sync_page(struct kvm_vcpu *vcpu, |
Xiao Guangrong | a4a8e6f | 2010-11-19 17:04:03 +0800 | [diff] [blame] | 2100 | struct kvm_mmu_page *sp) |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 2101 | { |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2102 | return 0; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 2103 | } |
| 2104 | |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 2105 | static void nonpaging_update_pte(struct kvm_vcpu *vcpu, |
| 2106 | struct kvm_mmu_page *sp, u64 *spte, |
Xiao Guangrong | 7c56252 | 2011-03-28 10:29:27 +0800 | [diff] [blame] | 2107 | const void *pte) |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 2108 | { |
| 2109 | WARN_ON(1); |
| 2110 | } |
| 2111 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2112 | #define KVM_PAGE_ARRAY_NR 16 |
| 2113 | |
| 2114 | struct kvm_mmu_pages { |
| 2115 | struct mmu_page_and_offset { |
| 2116 | struct kvm_mmu_page *sp; |
| 2117 | unsigned int idx; |
| 2118 | } page[KVM_PAGE_ARRAY_NR]; |
| 2119 | unsigned int nr; |
| 2120 | }; |
| 2121 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 2122 | static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, |
| 2123 | int idx) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2124 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2125 | int i; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2126 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2127 | if (sp->unsync) |
| 2128 | for (i=0; i < pvec->nr; i++) |
| 2129 | if (pvec->page[i].sp == sp) |
| 2130 | return 0; |
| 2131 | |
| 2132 | pvec->page[pvec->nr].sp = sp; |
| 2133 | pvec->page[pvec->nr].idx = idx; |
| 2134 | pvec->nr++; |
| 2135 | return (pvec->nr == KVM_PAGE_ARRAY_NR); |
| 2136 | } |
| 2137 | |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2138 | static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) |
| 2139 | { |
| 2140 | --sp->unsync_children; |
| 2141 | WARN_ON((int)sp->unsync_children < 0); |
| 2142 | __clear_bit(idx, sp->unsync_child_bitmap); |
| 2143 | } |
| 2144 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2145 | static int __mmu_unsync_walk(struct kvm_mmu_page *sp, |
| 2146 | struct kvm_mmu_pages *pvec) |
| 2147 | { |
| 2148 | int i, ret, nr_unsync_leaf = 0; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2149 | |
Takuya Yoshikawa | 37178b8 | 2011-11-29 14:02:45 +0900 | [diff] [blame] | 2150 | for_each_set_bit(i, sp->unsync_child_bitmap, 512) { |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2151 | struct kvm_mmu_page *child; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2152 | u64 ent = sp->spt[i]; |
| 2153 | |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2154 | if (!is_shadow_present_pte(ent) || is_large_pte(ent)) { |
| 2155 | clear_unsync_child_bit(sp, i); |
| 2156 | continue; |
| 2157 | } |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2158 | |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 2159 | child = to_shadow_page(ent & PT64_BASE_ADDR_MASK); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2160 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2161 | if (child->unsync_children) { |
| 2162 | if (mmu_pages_add(pvec, child, i)) |
| 2163 | return -ENOSPC; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2164 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2165 | ret = __mmu_unsync_walk(child, pvec); |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2166 | if (!ret) { |
| 2167 | clear_unsync_child_bit(sp, i); |
| 2168 | continue; |
| 2169 | } else if (ret > 0) { |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2170 | nr_unsync_leaf += ret; |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2171 | } else |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 2172 | return ret; |
| 2173 | } else if (child->unsync) { |
| 2174 | nr_unsync_leaf++; |
| 2175 | if (mmu_pages_add(pvec, child, i)) |
| 2176 | return -ENOSPC; |
| 2177 | } else |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2178 | clear_unsync_child_bit(sp, i); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2179 | } |
| 2180 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2181 | return nr_unsync_leaf; |
| 2182 | } |
| 2183 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 2184 | #define INVALID_INDEX (-1) |
| 2185 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2186 | static int mmu_unsync_walk(struct kvm_mmu_page *sp, |
| 2187 | struct kvm_mmu_pages *pvec) |
| 2188 | { |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2189 | pvec->nr = 0; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2190 | if (!sp->unsync_children) |
| 2191 | return 0; |
| 2192 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 2193 | mmu_pages_add(pvec, sp, INVALID_INDEX); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2194 | return __mmu_unsync_walk(sp, pvec); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2195 | } |
| 2196 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2197 | static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 2198 | { |
| 2199 | WARN_ON(!sp->unsync); |
Xiao Guangrong | 5e1b3dd | 2010-04-28 11:55:06 +0800 | [diff] [blame] | 2200 | trace_kvm_mmu_sync_page(sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2201 | sp->unsync = 0; |
| 2202 | --kvm->stat.mmu_unsync; |
| 2203 | } |
| 2204 | |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2205 | static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 2206 | struct list_head *invalid_list); |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2207 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 2208 | struct list_head *invalid_list); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2209 | |
Sean Christopherson | ac101b7 | 2020-06-23 12:40:26 -0700 | [diff] [blame] | 2210 | #define for_each_valid_sp(_kvm, _sp, _list) \ |
| 2211 | hlist_for_each_entry(_sp, _list, hash_link) \ |
Sean Christopherson | fac026d | 2019-09-12 19:46:03 -0700 | [diff] [blame] | 2212 | if (is_obsolete_sp((_kvm), (_sp))) { \ |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2213 | } else |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2214 | |
Takuya Yoshikawa | 1044b03 | 2013-03-06 16:05:07 +0900 | [diff] [blame] | 2215 | #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ |
Sean Christopherson | ac101b7 | 2020-06-23 12:40:26 -0700 | [diff] [blame] | 2216 | for_each_valid_sp(_kvm, _sp, \ |
| 2217 | &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2218 | if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2219 | |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 2220 | static inline bool is_ept_sp(struct kvm_mmu_page *sp) |
| 2221 | { |
| 2222 | return sp->role.cr0_wp && sp->role.smap_andnot_wp; |
| 2223 | } |
| 2224 | |
Xiao Guangrong | f918b44 | 2010-06-11 21:30:36 +0800 | [diff] [blame] | 2225 | /* @sp->gfn should be write-protected at the call site */ |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2226 | static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
| 2227 | struct list_head *invalid_list) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2228 | { |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 2229 | if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) || |
| 2230 | vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2231 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2232 | return false; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2233 | } |
| 2234 | |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2235 | return true; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2236 | } |
| 2237 | |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 2238 | static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, |
| 2239 | struct list_head *invalid_list, |
| 2240 | bool remote_flush) |
| 2241 | { |
Sean Christopherson | cfd32ac | 2019-04-12 19:55:41 -0700 | [diff] [blame] | 2242 | if (!remote_flush && list_empty(invalid_list)) |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 2243 | return false; |
| 2244 | |
| 2245 | if (!list_empty(invalid_list)) |
| 2246 | kvm_mmu_commit_zap_page(kvm, invalid_list); |
| 2247 | else |
| 2248 | kvm_flush_remote_tlbs(kvm); |
| 2249 | return true; |
| 2250 | } |
| 2251 | |
Paolo Bonzini | 35a7051 | 2016-02-24 10:03:27 +0100 | [diff] [blame] | 2252 | static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, |
| 2253 | struct list_head *invalid_list, |
| 2254 | bool remote_flush, bool local_flush) |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 2255 | { |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 2256 | if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) |
Paolo Bonzini | 35a7051 | 2016-02-24 10:03:27 +0100 | [diff] [blame] | 2257 | return; |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 2258 | |
Sean Christopherson | a211363 | 2019-02-05 13:01:20 -0800 | [diff] [blame] | 2259 | if (local_flush) |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 2260 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 2261 | } |
| 2262 | |
Xiao Guangrong | e37fa78 | 2011-11-30 17:43:24 +0800 | [diff] [blame] | 2263 | #ifdef CONFIG_KVM_MMU_AUDIT |
| 2264 | #include "mmu_audit.c" |
| 2265 | #else |
| 2266 | static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } |
| 2267 | static void mmu_audit_disable(void) { } |
| 2268 | #endif |
| 2269 | |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 2270 | static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 2271 | { |
Sean Christopherson | fac026d | 2019-09-12 19:46:03 -0700 | [diff] [blame] | 2272 | return sp->role.invalid || |
| 2273 | unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 2274 | } |
| 2275 | |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2276 | static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2277 | struct list_head *invalid_list) |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 2278 | { |
Paolo Bonzini | 9a43c5d | 2016-02-24 10:28:01 +0100 | [diff] [blame] | 2279 | kvm_unlink_unsync_page(vcpu->kvm, sp); |
| 2280 | return __kvm_sync_page(vcpu, sp, invalid_list); |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 2281 | } |
| 2282 | |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2283 | /* @gfn should be write-protected at the call site */ |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2284 | static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2285 | struct list_head *invalid_list) |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2286 | { |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2287 | struct kvm_mmu_page *s; |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2288 | bool ret = false; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2289 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 2290 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2291 | if (!s->unsync) |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2292 | continue; |
| 2293 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2294 | WARN_ON(s->role.level != PG_LEVEL_4K); |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2295 | ret |= kvm_sync_page(vcpu, s, invalid_list); |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2296 | } |
| 2297 | |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2298 | return ret; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2299 | } |
| 2300 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2301 | struct mmu_page_path { |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 2302 | struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL]; |
| 2303 | unsigned int idx[PT64_ROOT_MAX_LEVEL]; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2304 | }; |
| 2305 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2306 | #define for_each_sp(pvec, sp, parents, i) \ |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2307 | for (i = mmu_pages_first(&pvec, &parents); \ |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2308 | i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ |
| 2309 | i = mmu_pages_next(&pvec, &parents, i)) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2310 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 2311 | static int mmu_pages_next(struct kvm_mmu_pages *pvec, |
| 2312 | struct mmu_page_path *parents, |
| 2313 | int i) |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2314 | { |
| 2315 | int n; |
| 2316 | |
| 2317 | for (n = i+1; n < pvec->nr; n++) { |
| 2318 | struct kvm_mmu_page *sp = pvec->page[n].sp; |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2319 | unsigned idx = pvec->page[n].idx; |
| 2320 | int level = sp->role.level; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2321 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2322 | parents->idx[level-1] = idx; |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2323 | if (level == PG_LEVEL_4K) |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2324 | break; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2325 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2326 | parents->parent[level-2] = sp; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2327 | } |
| 2328 | |
| 2329 | return n; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2330 | } |
| 2331 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2332 | static int mmu_pages_first(struct kvm_mmu_pages *pvec, |
| 2333 | struct mmu_page_path *parents) |
| 2334 | { |
| 2335 | struct kvm_mmu_page *sp; |
| 2336 | int level; |
| 2337 | |
| 2338 | if (pvec->nr == 0) |
| 2339 | return 0; |
| 2340 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 2341 | WARN_ON(pvec->page[0].idx != INVALID_INDEX); |
| 2342 | |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2343 | sp = pvec->page[0].sp; |
| 2344 | level = sp->role.level; |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2345 | WARN_ON(level == PG_LEVEL_4K); |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2346 | |
| 2347 | parents->parent[level-2] = sp; |
| 2348 | |
| 2349 | /* Also set up a sentinel. Further entries in pvec are all |
| 2350 | * children of sp, so this element is never overwritten. |
| 2351 | */ |
| 2352 | parents->parent[level-1] = NULL; |
| 2353 | return mmu_pages_next(pvec, parents, 0); |
| 2354 | } |
| 2355 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 2356 | static void mmu_pages_clear_parents(struct mmu_page_path *parents) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2357 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2358 | struct kvm_mmu_page *sp; |
| 2359 | unsigned int level = 0; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2360 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2361 | do { |
| 2362 | unsigned int idx = parents->idx[level]; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2363 | sp = parents->parent[level]; |
| 2364 | if (!sp) |
| 2365 | return; |
| 2366 | |
Xiao Guangrong | e23d3fe | 2016-02-24 09:46:06 +0100 | [diff] [blame] | 2367 | WARN_ON(idx == INVALID_INDEX); |
Takuya Yoshikawa | fd95145 | 2015-11-20 17:43:13 +0900 | [diff] [blame] | 2368 | clear_unsync_child_bit(sp, idx); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2369 | level++; |
Paolo Bonzini | 0a47cd8 | 2016-02-23 13:54:25 +0100 | [diff] [blame] | 2370 | } while (!sp->unsync_children); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2371 | } |
| 2372 | |
| 2373 | static void mmu_sync_children(struct kvm_vcpu *vcpu, |
| 2374 | struct kvm_mmu_page *parent) |
| 2375 | { |
| 2376 | int i; |
| 2377 | struct kvm_mmu_page *sp; |
| 2378 | struct mmu_page_path parents; |
| 2379 | struct kvm_mmu_pages pages; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2380 | LIST_HEAD(invalid_list); |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 2381 | bool flush = false; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2382 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2383 | while (mmu_unsync_walk(parent, &pages)) { |
Xiao Guangrong | 2f84569 | 2012-06-20 15:56:53 +0800 | [diff] [blame] | 2384 | bool protected = false; |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 2385 | |
| 2386 | for_each_sp(pages, sp, parents, i) |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 2387 | protected |= rmap_write_protect(vcpu, sp->gfn); |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 2388 | |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 2389 | if (protected) { |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 2390 | kvm_flush_remote_tlbs(vcpu->kvm); |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 2391 | flush = false; |
| 2392 | } |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 2393 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2394 | for_each_sp(pages, sp, parents, i) { |
Paolo Bonzini | 1f50f1b | 2016-02-24 11:07:14 +0100 | [diff] [blame] | 2395 | flush |= kvm_sync_page(vcpu, sp, &invalid_list); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2396 | mmu_pages_clear_parents(&parents); |
| 2397 | } |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 2398 | if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { |
| 2399 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); |
| 2400 | cond_resched_lock(&vcpu->kvm->mmu_lock); |
| 2401 | flush = false; |
| 2402 | } |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2403 | } |
Paolo Bonzini | 50c9e6f | 2016-02-25 10:47:38 +0100 | [diff] [blame] | 2404 | |
| 2405 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2406 | } |
| 2407 | |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 2408 | static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) |
| 2409 | { |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 2410 | atomic_set(&sp->write_flooding_count, 0); |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 2411 | } |
| 2412 | |
| 2413 | static void clear_sp_write_flooding_count(u64 *spte) |
| 2414 | { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 2415 | __clear_sp_write_flooding_count(sptep_to_sp(spte)); |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 2416 | } |
| 2417 | |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2418 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, |
| 2419 | gfn_t gfn, |
| 2420 | gva_t gaddr, |
| 2421 | unsigned level, |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 2422 | int direct, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 2423 | unsigned int access) |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2424 | { |
Sean Christopherson | fb58a9c | 2020-06-23 12:40:27 -0700 | [diff] [blame] | 2425 | bool direct_mmu = vcpu->arch.mmu->direct_map; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2426 | union kvm_mmu_page_role role; |
Sean Christopherson | ac101b7 | 2020-06-23 12:40:26 -0700 | [diff] [blame] | 2427 | struct hlist_head *sp_list; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2428 | unsigned quadrant; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2429 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 2430 | bool need_sync = false; |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2431 | bool flush = false; |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2432 | int collisions = 0; |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2433 | LIST_HEAD(invalid_list); |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2434 | |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 2435 | role = vcpu->arch.mmu->mmu_role.base; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2436 | role.level = level; |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 2437 | role.direct = direct; |
Avi Kivity | 84b0c8c | 2010-03-14 10:16:40 +0200 | [diff] [blame] | 2438 | if (role.direct) |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 2439 | role.gpte_is_8_bytes = true; |
Avi Kivity | 41074d0 | 2007-12-09 17:00:02 +0200 | [diff] [blame] | 2440 | role.access = access; |
Sean Christopherson | fb58a9c | 2020-06-23 12:40:27 -0700 | [diff] [blame] | 2441 | if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2442 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); |
| 2443 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; |
| 2444 | role.quadrant = quadrant; |
| 2445 | } |
Sean Christopherson | ac101b7 | 2020-06-23 12:40:26 -0700 | [diff] [blame] | 2446 | |
| 2447 | sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]; |
| 2448 | for_each_valid_sp(vcpu->kvm, sp, sp_list) { |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2449 | if (sp->gfn != gfn) { |
| 2450 | collisions++; |
| 2451 | continue; |
| 2452 | } |
| 2453 | |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2454 | if (!need_sync && sp->unsync) |
| 2455 | need_sync = true; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2456 | |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2457 | if (sp->role.word != role.word) |
| 2458 | continue; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2459 | |
Sean Christopherson | fb58a9c | 2020-06-23 12:40:27 -0700 | [diff] [blame] | 2460 | if (direct_mmu) |
| 2461 | goto trace_get_page; |
| 2462 | |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2463 | if (sp->unsync) { |
| 2464 | /* The page is good, but __kvm_sync_page might still end |
| 2465 | * up zapping it. If so, break in order to rebuild it. |
| 2466 | */ |
| 2467 | if (!__kvm_sync_page(vcpu, sp, &invalid_list)) |
| 2468 | break; |
| 2469 | |
| 2470 | WARN_ON(!list_empty(&invalid_list)); |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 2471 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2472 | } |
Xiao Guangrong | e02aa90 | 2010-05-15 18:52:34 +0800 | [diff] [blame] | 2473 | |
Takuya Yoshikawa | 98bba23 | 2015-11-26 21:14:34 +0900 | [diff] [blame] | 2474 | if (sp->unsync_children) |
Lai Jiangshan | f6f6195 | 2020-09-02 21:54:21 +0800 | [diff] [blame] | 2475 | kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); |
Xiao Guangrong | e02aa90 | 2010-05-15 18:52:34 +0800 | [diff] [blame] | 2476 | |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 2477 | __clear_sp_write_flooding_count(sp); |
Sean Christopherson | fb58a9c | 2020-06-23 12:40:27 -0700 | [diff] [blame] | 2478 | |
| 2479 | trace_get_page: |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2480 | trace_kvm_mmu_get_page(sp, false); |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2481 | goto out; |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2482 | } |
Takuya Yoshikawa | 4700579 | 2015-11-20 17:46:29 +0900 | [diff] [blame] | 2483 | |
Avi Kivity | dfc5aa0 | 2007-12-18 19:47:18 +0200 | [diff] [blame] | 2484 | ++vcpu->kvm->stat.mmu_cache_miss; |
Takuya Yoshikawa | 4700579 | 2015-11-20 17:46:29 +0900 | [diff] [blame] | 2485 | |
| 2486 | sp = kvm_mmu_alloc_page(vcpu, direct); |
| 2487 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2488 | sp->gfn = gfn; |
| 2489 | sp->role = role; |
Sean Christopherson | ac101b7 | 2020-06-23 12:40:26 -0700 | [diff] [blame] | 2490 | hlist_add_head(&sp->hash_link, sp_list); |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 2491 | if (!direct) { |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 2492 | /* |
| 2493 | * we should do write protection before syncing pages |
| 2494 | * otherwise the content of the synced shadow page may |
| 2495 | * be inconsistent with guest page table. |
| 2496 | */ |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 2497 | account_shadowed(vcpu->kvm, sp); |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2498 | if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn)) |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 2499 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1); |
Xiao Guangrong | 56ca57f | 2016-02-24 17:51:14 +0800 | [diff] [blame] | 2500 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2501 | if (level > PG_LEVEL_4K && need_sync) |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2502 | flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2503 | } |
Avi Kivity | f691fe1 | 2009-07-06 15:58:14 +0300 | [diff] [blame] | 2504 | trace_kvm_mmu_get_page(sp, true); |
Paolo Bonzini | 2a74003a | 2016-02-24 11:26:10 +0100 | [diff] [blame] | 2505 | |
| 2506 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); |
David Matlack | f3414bc | 2016-12-20 15:25:57 -0800 | [diff] [blame] | 2507 | out: |
| 2508 | if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions) |
| 2509 | vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions; |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2510 | return sp; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2511 | } |
| 2512 | |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2513 | static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator, |
| 2514 | struct kvm_vcpu *vcpu, hpa_t root, |
| 2515 | u64 addr) |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2516 | { |
| 2517 | iterator->addr = addr; |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2518 | iterator->shadow_addr = root; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2519 | iterator->level = vcpu->arch.mmu->shadow_root_level; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2520 | |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 2521 | if (iterator->level == PT64_ROOT_4LEVEL && |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2522 | vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && |
| 2523 | !vcpu->arch.mmu->direct_map) |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2524 | --iterator->level; |
| 2525 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2526 | if (iterator->level == PT32E_ROOT_LEVEL) { |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2527 | /* |
| 2528 | * prev_root is currently only used for 64-bit hosts. So only |
| 2529 | * the active root_hpa is valid here. |
| 2530 | */ |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2531 | BUG_ON(root != vcpu->arch.mmu->root_hpa); |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2532 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2533 | iterator->shadow_addr |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2534 | = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2535 | iterator->shadow_addr &= PT64_BASE_ADDR_MASK; |
| 2536 | --iterator->level; |
| 2537 | if (!iterator->shadow_addr) |
| 2538 | iterator->level = 0; |
| 2539 | } |
| 2540 | } |
| 2541 | |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2542 | static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, |
| 2543 | struct kvm_vcpu *vcpu, u64 addr) |
| 2544 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 2545 | shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 2546 | addr); |
| 2547 | } |
| 2548 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2549 | static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) |
| 2550 | { |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2551 | if (iterator->level < PG_LEVEL_4K) |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2552 | return false; |
Marcelo Tosatti | 4d88954 | 2009-06-11 12:07:41 -0300 | [diff] [blame] | 2553 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2554 | iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); |
| 2555 | iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; |
| 2556 | return true; |
| 2557 | } |
| 2558 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2559 | static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, |
| 2560 | u64 spte) |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2561 | { |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2562 | if (is_last_spte(spte, iterator->level)) { |
Xiao Guangrong | 052331b | 2011-07-12 03:21:17 +0800 | [diff] [blame] | 2563 | iterator->level = 0; |
| 2564 | return; |
| 2565 | } |
| 2566 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2567 | iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 2568 | --iterator->level; |
| 2569 | } |
| 2570 | |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2571 | static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) |
| 2572 | { |
David Hildenbrand | bb606a9 | 2017-08-24 20:51:23 +0200 | [diff] [blame] | 2573 | __shadow_walk_next(iterator, *iterator->sptep); |
Xiao Guangrong | c2a2ac2 | 2011-07-12 03:32:13 +0800 | [diff] [blame] | 2574 | } |
| 2575 | |
Takuya Yoshikawa | 98bba23 | 2015-11-26 21:14:34 +0900 | [diff] [blame] | 2576 | static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, |
| 2577 | struct kvm_mmu_page *sp) |
Avi Kivity | 32ef26a | 2010-07-13 14:27:04 +0300 | [diff] [blame] | 2578 | { |
| 2579 | u64 spte; |
| 2580 | |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 2581 | BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); |
Yang Zhang | 7a1638c | 2013-08-05 11:07:13 +0300 | [diff] [blame] | 2582 | |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 2583 | spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK | |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 2584 | shadow_user_mask | shadow_x_mask | shadow_me_mask; |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 2585 | |
| 2586 | if (sp_ad_disabled(sp)) |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 2587 | spte |= SPTE_AD_DISABLED_MASK; |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 2588 | else |
| 2589 | spte |= shadow_accessed_mask; |
Xiao Guangrong | 24db273 | 2013-02-05 15:28:02 +0800 | [diff] [blame] | 2590 | |
Xiao Guangrong | 1df9f2d | 2011-07-12 03:30:35 +0800 | [diff] [blame] | 2591 | mmu_spte_set(sptep, spte); |
Takuya Yoshikawa | 98bba23 | 2015-11-26 21:14:34 +0900 | [diff] [blame] | 2592 | |
| 2593 | mmu_page_add_parent_pte(vcpu, sp, sptep); |
| 2594 | |
| 2595 | if (sp->unsync_children || sp->unsync) |
| 2596 | mark_unsync(sptep); |
Avi Kivity | 32ef26a | 2010-07-13 14:27:04 +0300 | [diff] [blame] | 2597 | } |
| 2598 | |
Avi Kivity | a357bd2 | 2010-07-13 14:27:07 +0300 | [diff] [blame] | 2599 | static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
| 2600 | unsigned direct_access) |
| 2601 | { |
| 2602 | if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { |
| 2603 | struct kvm_mmu_page *child; |
| 2604 | |
| 2605 | /* |
| 2606 | * For the direct sp, if the guest pte's dirty bit |
| 2607 | * changed form clean to dirty, it will corrupt the |
| 2608 | * sp's access: allow writable in the read-only sp, |
| 2609 | * so we should update the spte at this point to get |
| 2610 | * a new sp with the correct access. |
| 2611 | */ |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 2612 | child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK); |
Avi Kivity | a357bd2 | 2010-07-13 14:27:07 +0300 | [diff] [blame] | 2613 | if (child->role.access == direct_access) |
| 2614 | return; |
| 2615 | |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2616 | drop_parent_pte(child, sptep); |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 2617 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1); |
Avi Kivity | a357bd2 | 2010-07-13 14:27:07 +0300 | [diff] [blame] | 2618 | } |
| 2619 | } |
| 2620 | |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2621 | /* Returns the number of zapped non-leaf child shadow pages. */ |
| 2622 | static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 2623 | u64 *spte, struct list_head *invalid_list) |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2624 | { |
| 2625 | u64 pte; |
| 2626 | struct kvm_mmu_page *child; |
| 2627 | |
| 2628 | pte = *spte; |
| 2629 | if (is_shadow_present_pte(pte)) { |
Xiao Guangrong | 505aef8 | 2011-09-22 16:56:06 +0800 | [diff] [blame] | 2630 | if (is_last_spte(pte, sp->role.level)) { |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 2631 | drop_spte(kvm, spte); |
Xiao Guangrong | 505aef8 | 2011-09-22 16:56:06 +0800 | [diff] [blame] | 2632 | if (is_large_pte(pte)) |
| 2633 | --kvm->stat.lpages; |
| 2634 | } else { |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 2635 | child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2636 | drop_parent_pte(child, spte); |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2637 | |
| 2638 | /* |
| 2639 | * Recursively zap nested TDP SPs, parentless SPs are |
| 2640 | * unlikely to be used again in the near future. This |
| 2641 | * avoids retaining a large number of stale nested SPs. |
| 2642 | */ |
| 2643 | if (tdp_enabled && invalid_list && |
| 2644 | child->role.guest_mode && !child->parent_ptes.val) |
| 2645 | return kvm_mmu_prepare_zap_page(kvm, child, |
| 2646 | invalid_list); |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2647 | } |
Sean Christopherson | ace569e | 2020-09-23 15:14:05 -0700 | [diff] [blame] | 2648 | } else if (is_mmio_spte(pte)) { |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 2649 | mmu_spte_clear_no_track(spte); |
Sean Christopherson | ace569e | 2020-09-23 15:14:05 -0700 | [diff] [blame] | 2650 | } |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2651 | return 0; |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2652 | } |
| 2653 | |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2654 | static int kvm_mmu_page_unlink_children(struct kvm *kvm, |
| 2655 | struct kvm_mmu_page *sp, |
| 2656 | struct list_head *invalid_list) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2657 | { |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2658 | int zapped = 0; |
Avi Kivity | 697fe2e | 2007-01-05 16:36:46 -0800 | [diff] [blame] | 2659 | unsigned i; |
Avi Kivity | 697fe2e | 2007-01-05 16:36:46 -0800 | [diff] [blame] | 2660 | |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 2661 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2662 | zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list); |
| 2663 | |
| 2664 | return zapped; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2665 | } |
| 2666 | |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2667 | static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2668 | { |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 2669 | u64 *sptep; |
| 2670 | struct rmap_iterator iter; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2671 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 2672 | while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) |
Takuya Yoshikawa | 1e3f42f | 2012-03-21 23:50:34 +0900 | [diff] [blame] | 2673 | drop_parent_pte(sp, sptep); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2674 | } |
| 2675 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2676 | static int mmu_zap_unsync_children(struct kvm *kvm, |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2677 | struct kvm_mmu_page *parent, |
| 2678 | struct list_head *invalid_list) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2679 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2680 | int i, zapped = 0; |
| 2681 | struct mmu_page_path parents; |
| 2682 | struct kvm_mmu_pages pages; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2683 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2684 | if (parent->role.level == PG_LEVEL_4K) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2685 | return 0; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2686 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2687 | while (mmu_unsync_walk(parent, &pages)) { |
| 2688 | struct kvm_mmu_page *sp; |
| 2689 | |
| 2690 | for_each_sp(pages, sp, parents, i) { |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2691 | kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2692 | mmu_pages_clear_parents(&parents); |
Xiao Guangrong | 77662e0 | 2010-04-16 16:34:42 +0800 | [diff] [blame] | 2693 | zapped++; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2694 | } |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 2695 | } |
| 2696 | |
| 2697 | return zapped; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2698 | } |
| 2699 | |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2700 | static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, |
| 2701 | struct kvm_mmu_page *sp, |
| 2702 | struct list_head *invalid_list, |
| 2703 | int *nr_zapped) |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2704 | { |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2705 | bool list_unstable; |
Avi Kivity | f691fe1 | 2009-07-06 15:58:14 +0300 | [diff] [blame] | 2706 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2707 | trace_kvm_mmu_prepare_zap_page(sp); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2708 | ++kvm->stat.mmu_shadow_zapped; |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2709 | *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 2710 | *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 2711 | kvm_mmu_unlink_parents(kvm, sp); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 2712 | |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2713 | /* Zapping children means active_mmu_pages has become unstable. */ |
| 2714 | list_unstable = *nr_zapped; |
| 2715 | |
Avi Kivity | f6e2c02b | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 2716 | if (!sp->role.invalid && !sp->role.direct) |
Paolo Bonzini | 3ed1a47 | 2015-05-19 16:29:22 +0200 | [diff] [blame] | 2717 | unaccount_shadowed(kvm, sp); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 2718 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2719 | if (sp->unsync) |
| 2720 | kvm_unlink_unsync_page(kvm, sp); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2721 | if (!sp->root_count) { |
Gui Jianfeng | 54a4f02 | 2010-05-05 09:03:49 +0800 | [diff] [blame] | 2722 | /* Count self */ |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2723 | (*nr_zapped)++; |
Sean Christopherson | f95eec9 | 2020-06-23 12:35:39 -0700 | [diff] [blame] | 2724 | |
| 2725 | /* |
| 2726 | * Already invalid pages (previously active roots) are not on |
| 2727 | * the active page list. See list_del() in the "else" case of |
| 2728 | * !sp->root_count. |
| 2729 | */ |
| 2730 | if (sp->role.invalid) |
| 2731 | list_add(&sp->link, invalid_list); |
| 2732 | else |
| 2733 | list_move(&sp->link, invalid_list); |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame] | 2734 | kvm_mod_used_mmu_pages(kvm, -1); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 2735 | } else { |
Sean Christopherson | f95eec9 | 2020-06-23 12:35:39 -0700 | [diff] [blame] | 2736 | /* |
| 2737 | * Remove the active root from the active page list, the root |
| 2738 | * will be explicitly freed when the root_count hits zero. |
| 2739 | */ |
| 2740 | list_del(&sp->link); |
Gleb Natapov | 05988d7 | 2013-05-31 08:36:30 +0800 | [diff] [blame] | 2741 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 2742 | /* |
| 2743 | * Obsolete pages cannot be used on any vCPUs, see the comment |
| 2744 | * in kvm_mmu_zap_all_fast(). Note, is_obsolete_sp() also |
| 2745 | * treats invalid shadow pages as being obsolete. |
| 2746 | */ |
| 2747 | if (!is_obsolete_sp(kvm, sp)) |
Gleb Natapov | 05988d7 | 2013-05-31 08:36:30 +0800 | [diff] [blame] | 2748 | kvm_reload_remote_mmus(kvm); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 2749 | } |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2750 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2751 | if (sp->lpage_disallowed) |
| 2752 | unaccount_huge_nx_page(kvm, sp); |
| 2753 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2754 | sp->role.invalid = 1; |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 2755 | return list_unstable; |
| 2756 | } |
| 2757 | |
| 2758 | static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 2759 | struct list_head *invalid_list) |
| 2760 | { |
| 2761 | int nr_zapped; |
| 2762 | |
| 2763 | __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); |
| 2764 | return nr_zapped; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2765 | } |
| 2766 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2767 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 2768 | struct list_head *invalid_list) |
| 2769 | { |
Takuya Yoshikawa | 945315b | 2013-03-06 16:05:52 +0900 | [diff] [blame] | 2770 | struct kvm_mmu_page *sp, *nsp; |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2771 | |
| 2772 | if (list_empty(invalid_list)) |
| 2773 | return; |
| 2774 | |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 2775 | /* |
Lan Tianyu | 9753f52 | 2016-03-13 11:10:24 +0800 | [diff] [blame] | 2776 | * We need to make sure everyone sees our modifications to |
| 2777 | * the page tables and see changes to vcpu->mode here. The barrier |
| 2778 | * in the kvm_flush_remote_tlbs() achieves this. This pairs |
| 2779 | * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end. |
| 2780 | * |
| 2781 | * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit |
| 2782 | * guest mode and/or lockless shadow page table walks. |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 2783 | */ |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2784 | kvm_flush_remote_tlbs(kvm); |
| 2785 | |
Takuya Yoshikawa | 945315b | 2013-03-06 16:05:52 +0900 | [diff] [blame] | 2786 | list_for_each_entry_safe(sp, nsp, invalid_list, link) { |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2787 | WARN_ON(!sp->role.invalid || sp->root_count); |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame] | 2788 | kvm_mmu_free_page(sp); |
Takuya Yoshikawa | 945315b | 2013-03-06 16:05:52 +0900 | [diff] [blame] | 2789 | } |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 2790 | } |
| 2791 | |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2792 | static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm, |
| 2793 | unsigned long nr_to_zap) |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2794 | { |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2795 | unsigned long total_zapped = 0; |
| 2796 | struct kvm_mmu_page *sp, *tmp; |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2797 | LIST_HEAD(invalid_list); |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2798 | bool unstable; |
| 2799 | int nr_zapped; |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2800 | |
| 2801 | if (list_empty(&kvm->arch.active_mmu_pages)) |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2802 | return 0; |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2803 | |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2804 | restart: |
| 2805 | list_for_each_entry_safe(sp, tmp, &kvm->arch.active_mmu_pages, link) { |
| 2806 | /* |
| 2807 | * Don't zap active root pages, the page itself can't be freed |
| 2808 | * and zapping it will just force vCPUs to realloc and reload. |
| 2809 | */ |
| 2810 | if (sp->root_count) |
| 2811 | continue; |
| 2812 | |
| 2813 | unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, |
| 2814 | &nr_zapped); |
| 2815 | total_zapped += nr_zapped; |
| 2816 | if (total_zapped >= nr_to_zap) |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2817 | break; |
| 2818 | |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2819 | if (unstable) |
| 2820 | goto restart; |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2821 | } |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2822 | |
| 2823 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
| 2824 | |
| 2825 | kvm->stat.mmu_recycled += total_zapped; |
| 2826 | return total_zapped; |
| 2827 | } |
| 2828 | |
Sean Christopherson | afe8d7e | 2020-06-22 13:20:30 -0700 | [diff] [blame] | 2829 | static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm) |
| 2830 | { |
| 2831 | if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) |
| 2832 | return kvm->arch.n_max_mmu_pages - |
| 2833 | kvm->arch.n_used_mmu_pages; |
| 2834 | |
| 2835 | return 0; |
Takuya Yoshikawa | 5da5960 | 2013-03-06 16:06:58 +0900 | [diff] [blame] | 2836 | } |
| 2837 | |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2838 | static int make_mmu_pages_available(struct kvm_vcpu *vcpu) |
| 2839 | { |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2840 | unsigned long avail = kvm_mmu_available_pages(vcpu->kvm); |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2841 | |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2842 | if (likely(avail >= KVM_MIN_FREE_MMU_PAGES)) |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2843 | return 0; |
| 2844 | |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2845 | kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail); |
Sean Christopherson | ba7888d | 2019-12-06 15:57:15 -0800 | [diff] [blame] | 2846 | |
| 2847 | if (!kvm_mmu_available_pages(vcpu->kvm)) |
| 2848 | return -ENOSPC; |
| 2849 | return 0; |
| 2850 | } |
| 2851 | |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2852 | /* |
| 2853 | * Changing the number of mmu pages allocated to the vm |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2854 | * Note: if goal_nr_mmu_pages is too small, you will get dead lock |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2855 | */ |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 2856 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2857 | { |
Takuya Yoshikawa | b34cb59 | 2013-01-08 19:46:07 +0900 | [diff] [blame] | 2858 | spin_lock(&kvm->mmu_lock); |
| 2859 | |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2860 | if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { |
Sean Christopherson | 6b82ef2 | 2020-06-23 12:35:40 -0700 | [diff] [blame] | 2861 | kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages - |
| 2862 | goal_nr_mmu_pages); |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2863 | |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2864 | goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2865 | } |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2866 | |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 2867 | kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; |
Takuya Yoshikawa | b34cb59 | 2013-01-08 19:46:07 +0900 | [diff] [blame] | 2868 | |
| 2869 | spin_unlock(&kvm->mmu_lock); |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 2870 | } |
| 2871 | |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 2872 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2873 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2874 | struct kvm_mmu_page *sp; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2875 | LIST_HEAD(invalid_list); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2876 | int r; |
| 2877 | |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 2878 | pgprintk("%s: looking for gfn %llx\n", __func__, gfn); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2879 | r = 0; |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 2880 | spin_lock(&kvm->mmu_lock); |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 2881 | for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 2882 | pgprintk("%s: gfn %llx role %x\n", __func__, gfn, |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2883 | sp->role.word); |
| 2884 | r = 1; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 2885 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 2886 | } |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2887 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 2888 | spin_unlock(&kvm->mmu_lock); |
| 2889 | |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 2890 | return r; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2891 | } |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 2892 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2893 | |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2894 | static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2895 | { |
Xiao Guangrong | 5e1b3dd | 2010-04-28 11:55:06 +0800 | [diff] [blame] | 2896 | trace_kvm_mmu_unsync_page(sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2897 | ++vcpu->kvm->stat.mmu_unsync; |
| 2898 | sp->unsync = 1; |
Marcelo Tosatti | 6cffe8c | 2008-12-01 22:32:04 -0200 | [diff] [blame] | 2899 | |
Xiao Guangrong | 6b18493 | 2010-04-16 21:29:17 +0800 | [diff] [blame] | 2900 | kvm_mmu_mark_parents_unsync(sp); |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2901 | } |
| 2902 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2903 | static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2904 | bool can_unsync) |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2905 | { |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2906 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2907 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2908 | if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) |
| 2909 | return true; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2910 | |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2911 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { |
Xiao Guangrong | 36a2e67 | 2010-06-30 16:02:02 +0800 | [diff] [blame] | 2912 | if (!can_unsync) |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2913 | return true; |
Xiao Guangrong | 36a2e67 | 2010-06-30 16:02:02 +0800 | [diff] [blame] | 2914 | |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2915 | if (sp->unsync) |
| 2916 | continue; |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 2917 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 2918 | WARN_ON(sp->role.level != PG_LEVEL_4K); |
Xiao Guangrong | 5c520e9 | 2016-02-24 17:51:15 +0800 | [diff] [blame] | 2919 | kvm_unsync_page(vcpu, sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2920 | } |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2921 | |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 2922 | /* |
| 2923 | * We need to ensure that the marking of unsync pages is visible |
| 2924 | * before the SPTE is updated to allow writes because |
| 2925 | * kvm_mmu_sync_roots() checks the unsync flags without holding |
| 2926 | * the MMU lock and so can race with this. If the SPTE was updated |
| 2927 | * before the page had been marked as unsync-ed, something like the |
| 2928 | * following could happen: |
| 2929 | * |
| 2930 | * CPU 1 CPU 2 |
| 2931 | * --------------------------------------------------------------------- |
| 2932 | * 1.2 Host updates SPTE |
| 2933 | * to be writable |
| 2934 | * 2.1 Guest writes a GPTE for GVA X. |
| 2935 | * (GPTE being in the guest page table shadowed |
| 2936 | * by the SP from CPU 1.) |
| 2937 | * This reads SPTE during the page table walk. |
| 2938 | * Since SPTE.W is read as 1, there is no |
| 2939 | * fault. |
| 2940 | * |
| 2941 | * 2.2 Guest issues TLB flush. |
| 2942 | * That causes a VM Exit. |
| 2943 | * |
| 2944 | * 2.3 kvm_mmu_sync_pages() reads sp->unsync. |
| 2945 | * Since it is false, so it just returns. |
| 2946 | * |
| 2947 | * 2.4 Guest accesses GVA X. |
| 2948 | * Since the mapping in the SP was not updated, |
| 2949 | * so the old mapping for GVA X incorrectly |
| 2950 | * gets used. |
| 2951 | * 1.1 Host marks SP |
| 2952 | * as unsync |
| 2953 | * (sp->unsync = true) |
| 2954 | * |
| 2955 | * The write barrier below ensures that 1.1 happens before 1.2 and thus |
| 2956 | * the situation in 2.4 does not arise. The implicit barrier in 2.2 |
| 2957 | * pairs with this write barrier. |
| 2958 | */ |
| 2959 | smp_wmb(); |
| 2960 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 2961 | return false; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 2962 | } |
| 2963 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2964 | static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) |
Paolo Bonzini | d1fe921 | 2015-07-07 15:03:18 +0200 | [diff] [blame] | 2965 | { |
| 2966 | if (pfn_valid(pfn)) |
Haozhong Zhang | aa2e063 | 2017-12-20 15:29:29 +0800 | [diff] [blame] | 2967 | return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && |
| 2968 | /* |
| 2969 | * Some reserved pages, such as those from NVDIMM |
| 2970 | * DAX devices, are not for MMIO, and can be mapped |
| 2971 | * with cached memory type for better performance. |
| 2972 | * However, the above check misconceives those pages |
| 2973 | * as MMIO, and results in KVM mapping them with UC |
| 2974 | * memory type, which would hurt the performance. |
| 2975 | * Therefore, we check the host memory type in addition |
| 2976 | * and only treat UC/UC-/WC pages as MMIO. |
| 2977 | */ |
| 2978 | (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); |
Paolo Bonzini | d1fe921 | 2015-07-07 15:03:18 +0200 | [diff] [blame] | 2979 | |
KarimAllah Ahmed | 0c55671 | 2019-01-31 21:24:44 +0100 | [diff] [blame] | 2980 | return !e820__mapped_raw_any(pfn_to_hpa(pfn), |
| 2981 | pfn_to_hpa(pfn + 1) - 1, |
| 2982 | E820_TYPE_RAM); |
Paolo Bonzini | d1fe921 | 2015-07-07 15:03:18 +0200 | [diff] [blame] | 2983 | } |
| 2984 | |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 2985 | /* Bits which may be returned by set_spte() */ |
| 2986 | #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) |
| 2987 | #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) |
Sean Christopherson | 1270375 | 2020-09-23 15:04:25 -0700 | [diff] [blame] | 2988 | #define SET_SPTE_SPURIOUS BIT(2) |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 2989 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2990 | static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 2991 | unsigned int pte_access, int level, |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 2992 | gfn_t gfn, kvm_pfn_t pfn, bool speculative, |
Lai Jiangshan | 9bdbba1 | 2010-11-19 17:03:22 +0800 | [diff] [blame] | 2993 | bool can_unsync, bool host_writable) |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 2994 | { |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 2995 | u64 spte = 0; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2996 | int ret = 0; |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 2997 | struct kvm_mmu_page *sp; |
Sheng Yang | 64d4d52 | 2008-10-09 16:01:57 +0800 | [diff] [blame] | 2998 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 2999 | if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3000 | return 0; |
| 3001 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 3002 | sp = sptep_to_sp(sptep); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3003 | if (sp_ad_disabled(sp)) |
Paolo Bonzini | 6eeb4ef | 2019-09-24 12:43:08 +0200 | [diff] [blame] | 3004 | spte |= SPTE_AD_DISABLED_MASK; |
Paolo Bonzini | 1f4e5fc | 2019-09-26 18:47:59 +0200 | [diff] [blame] | 3005 | else if (kvm_vcpu_ad_need_write_protect(vcpu)) |
| 3006 | spte |= SPTE_AD_WRPROT_ONLY_MASK; |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3007 | |
Bandan Das | d95c556 | 2016-07-12 18:18:51 -0400 | [diff] [blame] | 3008 | /* |
| 3009 | * For the EPT case, shadow_present_mask is 0 if hardware |
| 3010 | * supports exec-only page table entries. In that case, |
| 3011 | * ACC_USER_MASK and shadow_user_mask are used to represent |
| 3012 | * read access. See FNAME(gpte_access) in paging_tmpl.h. |
| 3013 | */ |
Bandan Das | ffb128c | 2016-07-12 18:18:49 -0400 | [diff] [blame] | 3014 | spte |= shadow_present_mask; |
Avi Kivity | 947da53 | 2008-03-18 11:05:52 +0200 | [diff] [blame] | 3015 | if (!speculative) |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3016 | spte |= spte_shadow_accessed_mask(spte); |
Xiao Guangrong | 640d9b0 | 2011-07-12 03:24:39 +0800 | [diff] [blame] | 3017 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3018 | if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 3019 | is_nx_huge_page_enabled()) { |
| 3020 | pte_access &= ~ACC_EXEC_MASK; |
| 3021 | } |
| 3022 | |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 3023 | if (pte_access & ACC_EXEC_MASK) |
| 3024 | spte |= shadow_x_mask; |
| 3025 | else |
| 3026 | spte |= shadow_nx_mask; |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 3027 | |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3028 | if (pte_access & ACC_USER_MASK) |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 3029 | spte |= shadow_user_mask; |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 3030 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3031 | if (level > PG_LEVEL_4K) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 3032 | spte |= PT_PAGE_SIZE_MASK; |
Avi Kivity | b0bc3ee | 2010-09-13 16:45:28 +0200 | [diff] [blame] | 3033 | if (tdp_enabled) |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 3034 | spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn, |
Paolo Bonzini | d1fe921 | 2015-07-07 15:03:18 +0200 | [diff] [blame] | 3035 | kvm_is_mmio_pfn(pfn)); |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3036 | |
Lai Jiangshan | 9bdbba1 | 2010-11-19 17:03:22 +0800 | [diff] [blame] | 3037 | if (host_writable) |
Izik Eidus | 1403283 | 2009-09-23 21:47:17 +0300 | [diff] [blame] | 3038 | spte |= SPTE_HOST_WRITEABLE; |
Xiao Guangrong | f8e453b | 2010-12-23 16:09:29 +0800 | [diff] [blame] | 3039 | else |
| 3040 | pte_access &= ~ACC_WRITE_MASK; |
Izik Eidus | 1403283 | 2009-09-23 21:47:17 +0300 | [diff] [blame] | 3041 | |
Tom Lendacky | daaf216 | 2018-03-08 17:17:31 -0600 | [diff] [blame] | 3042 | if (!kvm_is_mmio_pfn(pfn)) |
| 3043 | spte |= shadow_me_mask; |
| 3044 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 3045 | spte |= (u64)pfn << PAGE_SHIFT; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3046 | |
Xiao Guangrong | c2288505 | 2013-01-08 14:36:04 +0800 | [diff] [blame] | 3047 | if (pte_access & ACC_WRITE_MASK) { |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 3048 | spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3049 | |
Marcelo Tosatti | ecc5589 | 2008-11-25 15:58:07 +0100 | [diff] [blame] | 3050 | /* |
| 3051 | * Optimization: for pte sync, if spte was writable the hash |
| 3052 | * lookup is unnecessary (and expensive). Write protection |
| 3053 | * is responsibility of mmu_get_page / kvm_sync_page. |
| 3054 | * Same reasoning can be applied to dirty page accounting. |
| 3055 | */ |
Takuya Yoshikawa | 8dae444 | 2010-01-18 18:45:10 +0900 | [diff] [blame] | 3056 | if (!can_unsync && is_writable_pte(*sptep)) |
Marcelo Tosatti | ecc5589 | 2008-11-25 15:58:07 +0100 | [diff] [blame] | 3057 | goto set_pte; |
| 3058 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 3059 | if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 3060 | pgprintk("%s: found shadow page for %llx, marking ro\n", |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 3061 | __func__, gfn); |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 3062 | ret |= SET_SPTE_WRITE_PROTECTED_PT; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3063 | pte_access &= ~ACC_WRITE_MASK; |
Xiao Guangrong | 49fde34 | 2012-06-20 15:58:58 +0800 | [diff] [blame] | 3064 | spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3065 | } |
| 3066 | } |
| 3067 | |
Kai Huang | 9b51a63 | 2015-01-28 10:54:25 +0800 | [diff] [blame] | 3068 | if (pte_access & ACC_WRITE_MASK) { |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 3069 | kvm_vcpu_mark_page_dirty(vcpu, gfn); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3070 | spte |= spte_shadow_dirty_mask(spte); |
Kai Huang | 9b51a63 | 2015-01-28 10:54:25 +0800 | [diff] [blame] | 3071 | } |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3072 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3073 | if (speculative) |
| 3074 | spte = mark_spte_for_access_track(spte); |
| 3075 | |
Marcelo Tosatti | 38187c8 | 2008-09-23 13:18:32 -0300 | [diff] [blame] | 3076 | set_pte: |
Sean Christopherson | 1270375 | 2020-09-23 15:04:25 -0700 | [diff] [blame] | 3077 | if (*sptep == spte) |
| 3078 | ret |= SET_SPTE_SPURIOUS; |
| 3079 | else if (mmu_spte_update(sptep, spte)) |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 3080 | ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3081 | return ret; |
| 3082 | } |
| 3083 | |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 3084 | static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
Sean Christopherson | e88b809 | 2020-09-23 11:37:35 -0700 | [diff] [blame] | 3085 | unsigned int pte_access, bool write_fault, int level, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 3086 | gfn_t gfn, kvm_pfn_t pfn, bool speculative, |
| 3087 | bool host_writable) |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3088 | { |
| 3089 | int was_rmapped = 0; |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 3090 | int rmap_count; |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 3091 | int set_spte_ret; |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3092 | int ret = RET_PF_FIXED; |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 3093 | bool flush = false; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3094 | |
Xiao Guangrong | f761620 | 2013-02-05 15:27:27 +0800 | [diff] [blame] | 3095 | pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, |
| 3096 | *sptep, write_fault, gfn); |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3097 | |
Takuya Yoshikawa | afd28fe | 2015-11-20 17:44:55 +0900 | [diff] [blame] | 3098 | if (is_shadow_present_pte(*sptep)) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3099 | /* |
| 3100 | * If we overwrite a PTE page pointer with a 2MB PMD, unlink |
| 3101 | * the parent of the now unreachable PTE. |
| 3102 | */ |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3103 | if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3104 | struct kvm_mmu_page *child; |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 3105 | u64 pte = *sptep; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3106 | |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 3107 | child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 3108 | drop_parent_pte(child, sptep); |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 3109 | flush = true; |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 3110 | } else if (pfn != spte_to_pfn(*sptep)) { |
Xiao Guangrong | 9ad17b10 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 3111 | pgprintk("hfn old %llx new %llx\n", |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 3112 | spte_to_pfn(*sptep), pfn); |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 3113 | drop_spte(vcpu->kvm, sptep); |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 3114 | flush = true; |
Joerg Roedel | 6bed6b9 | 2009-02-18 14:08:59 +0100 | [diff] [blame] | 3115 | } else |
| 3116 | was_rmapped = 1; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3117 | } |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 3118 | |
Junaid Shahid | 5ce4786 | 2018-06-27 14:59:04 -0700 | [diff] [blame] | 3119 | set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, |
| 3120 | speculative, true, host_writable); |
| 3121 | if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3122 | if (write_fault) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3123 | ret = RET_PF_EMULATE; |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 3124 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Marcelo Tosatti | a378b4e | 2008-09-23 13:18:31 -0300 | [diff] [blame] | 3125 | } |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 3126 | |
Tianyu Lan | c2a4ead | 2018-07-24 08:17:07 +0000 | [diff] [blame] | 3127 | if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) |
Lan Tianyu | c3134ce | 2018-12-06 21:21:09 +0800 | [diff] [blame] | 3128 | kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, |
| 3129 | KVM_PAGES_PER_HPAGE(level)); |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 3130 | |
Takuya Yoshikawa | 029499b | 2015-11-20 17:44:05 +0900 | [diff] [blame] | 3131 | if (unlikely(is_mmio_spte(*sptep))) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3132 | ret = RET_PF_EMULATE; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3133 | |
Sean Christopherson | 1270375 | 2020-09-23 15:04:25 -0700 | [diff] [blame] | 3134 | /* |
| 3135 | * The fault is fully spurious if and only if the new SPTE and old SPTE |
| 3136 | * are identical, and emulation is not required. |
| 3137 | */ |
| 3138 | if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) { |
| 3139 | WARN_ON_ONCE(!was_rmapped); |
| 3140 | return RET_PF_SPURIOUS; |
| 3141 | } |
| 3142 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 3143 | pgprintk("%s: setting spte %llx\n", __func__, *sptep); |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 3144 | trace_kvm_mmu_set_spte(level, gfn, sptep); |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 3145 | if (!was_rmapped && is_large_pte(*sptep)) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 3146 | ++vcpu->kvm->stat.lpages; |
| 3147 | |
Xiao Guangrong | ffb61bb | 2011-07-12 03:22:01 +0800 | [diff] [blame] | 3148 | if (is_shadow_present_pte(*sptep)) { |
Xiao Guangrong | ffb61bb | 2011-07-12 03:22:01 +0800 | [diff] [blame] | 3149 | if (!was_rmapped) { |
| 3150 | rmap_count = rmap_add(vcpu, sptep, gfn); |
| 3151 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) |
| 3152 | rmap_recycle(vcpu, sptep, gfn); |
| 3153 | } |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3154 | } |
Xiao Guangrong | cb9aaa3 | 2012-08-03 15:42:10 +0800 | [diff] [blame] | 3155 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3156 | return ret; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 3157 | } |
| 3158 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 3159 | static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3160 | bool no_dirty_log) |
| 3161 | { |
| 3162 | struct kvm_memory_slot *slot; |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3163 | |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 3164 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); |
Xiao Guangrong | 903816f | 2012-07-17 21:54:11 +0800 | [diff] [blame] | 3165 | if (!slot) |
Xiao Guangrong | 6c8ee57 | 2012-08-03 15:37:54 +0800 | [diff] [blame] | 3166 | return KVM_PFN_ERR_FAULT; |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3167 | |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 3168 | return gfn_to_pfn_memslot_atomic(slot, gfn); |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3169 | } |
| 3170 | |
| 3171 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, |
| 3172 | struct kvm_mmu_page *sp, |
| 3173 | u64 *start, u64 *end) |
| 3174 | { |
| 3175 | struct page *pages[PTE_PREFETCH_NUM]; |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 3176 | struct kvm_memory_slot *slot; |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 3177 | unsigned int access = sp->role.access; |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3178 | int i, ret; |
| 3179 | gfn_t gfn; |
| 3180 | |
| 3181 | gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 3182 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); |
| 3183 | if (!slot) |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3184 | return -1; |
| 3185 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 3186 | ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3187 | if (ret <= 0) |
| 3188 | return -1; |
| 3189 | |
Junaid Shahid | 43fdcda | 2019-01-03 16:22:21 -0800 | [diff] [blame] | 3190 | for (i = 0; i < ret; i++, gfn++, start++) { |
Sean Christopherson | e88b809 | 2020-09-23 11:37:35 -0700 | [diff] [blame] | 3191 | mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn, |
Takuya Yoshikawa | 029499b | 2015-11-20 17:44:05 +0900 | [diff] [blame] | 3192 | page_to_pfn(pages[i]), true, true); |
Junaid Shahid | 43fdcda | 2019-01-03 16:22:21 -0800 | [diff] [blame] | 3193 | put_page(pages[i]); |
| 3194 | } |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3195 | |
| 3196 | return 0; |
| 3197 | } |
| 3198 | |
| 3199 | static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, |
| 3200 | struct kvm_mmu_page *sp, u64 *sptep) |
| 3201 | { |
| 3202 | u64 *spte, *start = NULL; |
| 3203 | int i; |
| 3204 | |
| 3205 | WARN_ON(!sp->role.direct); |
| 3206 | |
| 3207 | i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); |
| 3208 | spte = sp->spt + i; |
| 3209 | |
| 3210 | for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { |
Xiao Guangrong | c370795 | 2011-07-12 03:28:04 +0800 | [diff] [blame] | 3211 | if (is_shadow_present_pte(*spte) || spte == sptep) { |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3212 | if (!start) |
| 3213 | continue; |
| 3214 | if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) |
| 3215 | break; |
| 3216 | start = NULL; |
| 3217 | } else if (!start) |
| 3218 | start = spte; |
| 3219 | } |
| 3220 | } |
| 3221 | |
| 3222 | static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) |
| 3223 | { |
| 3224 | struct kvm_mmu_page *sp; |
| 3225 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 3226 | sp = sptep_to_sp(sptep); |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3227 | |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3228 | /* |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3229 | * Without accessed bits, there's no way to distinguish between |
| 3230 | * actually accessed translations and prefetched, so disable pte |
| 3231 | * prefetch if accessed bits aren't available. |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3232 | */ |
Peter Feiner | ac8d57e | 2017-06-30 17:26:31 -0700 | [diff] [blame] | 3233 | if (sp_ad_disabled(sp)) |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3234 | return; |
| 3235 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3236 | if (sp->role.level > PG_LEVEL_4K) |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 3237 | return; |
| 3238 | |
| 3239 | __direct_pte_prefetch(vcpu, sp, sptep); |
| 3240 | } |
| 3241 | |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3242 | static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3243 | kvm_pfn_t pfn, struct kvm_memory_slot *slot) |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3244 | { |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3245 | unsigned long hva; |
| 3246 | pte_t *pte; |
| 3247 | int level; |
| 3248 | |
Sean Christopherson | e851265 | 2020-01-08 12:24:48 -0800 | [diff] [blame] | 3249 | if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn)) |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3250 | return PG_LEVEL_4K; |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3251 | |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3252 | /* |
| 3253 | * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() |
| 3254 | * is not solely for performance, it's also necessary to avoid the |
| 3255 | * "writable" check in __gfn_to_hva_many(), which will always fail on |
| 3256 | * read-only memslots due to gfn_to_hva() assuming writes. Earlier |
| 3257 | * page fault steps have already verified the guest isn't writing a |
| 3258 | * read-only memslot. |
| 3259 | */ |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3260 | hva = __gfn_to_hva_memslot(slot, gfn); |
| 3261 | |
| 3262 | pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level); |
| 3263 | if (unlikely(!pte)) |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3264 | return PG_LEVEL_4K; |
Sean Christopherson | db54321 | 2020-01-08 12:24:41 -0800 | [diff] [blame] | 3265 | |
| 3266 | return level; |
| 3267 | } |
| 3268 | |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3269 | static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, |
Sean Christopherson | 3cf0661 | 2020-09-23 11:37:31 -0700 | [diff] [blame] | 3270 | int max_level, kvm_pfn_t *pfnp, |
| 3271 | bool huge_page_disallowed, int *req_level) |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 3272 | { |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3273 | struct kvm_memory_slot *slot; |
Sean Christopherson | 2c0629f | 2020-01-08 12:24:47 -0800 | [diff] [blame] | 3274 | struct kvm_lpage_info *linfo; |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 3275 | kvm_pfn_t pfn = *pfnp; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3276 | kvm_pfn_t mask; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3277 | int level; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3278 | |
Sean Christopherson | 3cf0661 | 2020-09-23 11:37:31 -0700 | [diff] [blame] | 3279 | *req_level = PG_LEVEL_4K; |
| 3280 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3281 | if (unlikely(max_level == PG_LEVEL_4K)) |
| 3282 | return PG_LEVEL_4K; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3283 | |
Sean Christopherson | e851265 | 2020-01-08 12:24:48 -0800 | [diff] [blame] | 3284 | if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn)) |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3285 | return PG_LEVEL_4K; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3286 | |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3287 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true); |
| 3288 | if (!slot) |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3289 | return PG_LEVEL_4K; |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3290 | |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 3291 | max_level = min(max_level, max_huge_page_level); |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3292 | for ( ; max_level > PG_LEVEL_4K; max_level--) { |
Sean Christopherson | 2c0629f | 2020-01-08 12:24:47 -0800 | [diff] [blame] | 3293 | linfo = lpage_info_slot(gfn, slot, max_level); |
| 3294 | if (!linfo->disallow_lpage) |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3295 | break; |
| 3296 | } |
| 3297 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3298 | if (max_level == PG_LEVEL_4K) |
| 3299 | return PG_LEVEL_4K; |
Sean Christopherson | 293e306 | 2020-01-08 12:24:46 -0800 | [diff] [blame] | 3300 | |
| 3301 | level = host_pfn_mapping_level(vcpu, gfn, pfn, slot); |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3302 | if (level == PG_LEVEL_4K) |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3303 | return level; |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3304 | |
Sean Christopherson | 3cf0661 | 2020-09-23 11:37:31 -0700 | [diff] [blame] | 3305 | *req_level = level = min(level, max_level); |
| 3306 | |
| 3307 | /* |
| 3308 | * Enforce the iTLB multihit workaround after capturing the requested |
| 3309 | * level, which will be used to do precise, accurate accounting. |
| 3310 | */ |
| 3311 | if (huge_page_disallowed) |
| 3312 | return PG_LEVEL_4K; |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 3313 | |
| 3314 | /* |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3315 | * mmu_notifier_retry() was successful and mmu_lock is held, so |
| 3316 | * the pmd can't be split from under us. |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 3317 | */ |
Sean Christopherson | 17eff01 | 2020-01-08 12:24:40 -0800 | [diff] [blame] | 3318 | mask = KVM_PAGES_PER_HPAGE(level) - 1; |
| 3319 | VM_BUG_ON((gfn & mask) != (pfn & mask)); |
| 3320 | *pfnp = pfn & ~mask; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3321 | |
| 3322 | return level; |
Sean Christopherson | 0885904 | 2019-12-06 15:57:25 -0800 | [diff] [blame] | 3323 | } |
| 3324 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 3325 | static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it, |
| 3326 | gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) |
| 3327 | { |
| 3328 | int level = *levelp; |
| 3329 | u64 spte = *it.sptep; |
| 3330 | |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3331 | if (it.level == level && level > PG_LEVEL_4K && |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 3332 | is_shadow_present_pte(spte) && |
| 3333 | !is_large_pte(spte)) { |
| 3334 | /* |
| 3335 | * A small SPTE exists for this pfn, but FNAME(fetch) |
| 3336 | * and __direct_map would like to create a large PTE |
| 3337 | * instead: just force them to go down another level, |
| 3338 | * patching back for them into pfn the next 9 bits of |
| 3339 | * the address. |
| 3340 | */ |
| 3341 | u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1); |
| 3342 | *pfnp |= gfn & page_mask; |
| 3343 | (*levelp)--; |
| 3344 | } |
| 3345 | } |
| 3346 | |
Sean Christopherson | 6c2fd34 | 2020-09-23 11:37:30 -0700 | [diff] [blame] | 3347 | static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 3348 | int map_writable, int max_level, kvm_pfn_t pfn, |
Sean Christopherson | 6c2fd34 | 2020-09-23 11:37:30 -0700 | [diff] [blame] | 3349 | bool prefault, bool is_tdp) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3350 | { |
Sean Christopherson | 6c2fd34 | 2020-09-23 11:37:30 -0700 | [diff] [blame] | 3351 | bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); |
| 3352 | bool write = error_code & PFERR_WRITE_MASK; |
| 3353 | bool exec = error_code & PFERR_FETCH_MASK; |
| 3354 | bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3355 | struct kvm_shadow_walk_iterator it; |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 3356 | struct kvm_mmu_page *sp; |
Sean Christopherson | 3cf0661 | 2020-09-23 11:37:31 -0700 | [diff] [blame] | 3357 | int level, req_level, ret; |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3358 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 3359 | gfn_t base_gfn = gfn; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3360 | |
Sean Christopherson | 0c7a98e | 2019-12-06 15:57:28 -0800 | [diff] [blame] | 3361 | if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3362 | return RET_PF_RETRY; |
Marcelo Tosatti | 989c6b3 | 2013-12-19 15:28:51 -0200 | [diff] [blame] | 3363 | |
Sean Christopherson | 3cf0661 | 2020-09-23 11:37:31 -0700 | [diff] [blame] | 3364 | level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn, |
| 3365 | huge_page_disallowed, &req_level); |
Sean Christopherson | 4cd071d | 2019-12-06 15:57:26 -0800 | [diff] [blame] | 3366 | |
Paolo Bonzini | 335e192 | 2019-07-01 06:22:57 -0400 | [diff] [blame] | 3367 | trace_kvm_mmu_spte_requested(gpa, level, pfn); |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3368 | for_each_shadow_entry(vcpu, gpa, it) { |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 3369 | /* |
| 3370 | * We cannot overwrite existing page tables with an NX |
| 3371 | * large page, as the leaf could be executable. |
| 3372 | */ |
Sean Christopherson | dcc7065 | 2020-09-23 11:37:34 -0700 | [diff] [blame] | 3373 | if (nx_huge_page_workaround_enabled) |
| 3374 | disallowed_hugepage_adjust(it, gfn, &pfn, &level); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 3375 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3376 | base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); |
| 3377 | if (it.level == level) |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 3378 | break; |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 3379 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3380 | drop_large_spte(vcpu, it.sptep); |
| 3381 | if (!is_shadow_present_pte(*it.sptep)) { |
| 3382 | sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, |
| 3383 | it.level - 1, true, ACC_ALL); |
Lai Jiangshan | c9fa0b3 | 2010-05-26 16:48:25 +0800 | [diff] [blame] | 3384 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3385 | link_shadow_page(vcpu, it.sptep, sp); |
Sean Christopherson | 5bcaf3e | 2020-09-23 11:37:32 -0700 | [diff] [blame] | 3386 | if (is_tdp && huge_page_disallowed && |
| 3387 | req_level >= it.level) |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 3388 | account_huge_nx_page(vcpu->kvm, sp); |
Avi Kivity | 9f652d21 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 3389 | } |
| 3390 | } |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3391 | |
| 3392 | ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, |
| 3393 | write, level, base_gfn, pfn, prefault, |
| 3394 | map_writable); |
Sean Christopherson | 1270375 | 2020-09-23 15:04:25 -0700 | [diff] [blame] | 3395 | if (ret == RET_PF_SPURIOUS) |
| 3396 | return ret; |
| 3397 | |
Paolo Bonzini | 3fcf2d1 | 2019-06-24 13:06:21 +0200 | [diff] [blame] | 3398 | direct_pte_prefetch(vcpu, it.sptep); |
| 3399 | ++vcpu->stat.pf_fixed; |
| 3400 | return ret; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3401 | } |
| 3402 | |
Huang Ying | 77db5cb | 2010-10-08 16:24:15 +0800 | [diff] [blame] | 3403 | static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 3404 | { |
Eric W. Biederman | 585a8b9 | 2018-04-16 14:23:27 -0500 | [diff] [blame] | 3405 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk); |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 3406 | } |
| 3407 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 3408 | static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 3409 | { |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 3410 | /* |
| 3411 | * Do not cache the mmio info caused by writing the readonly gfn |
| 3412 | * into the spte otherwise read access on readonly gfn also can |
| 3413 | * caused mmio page fault and treat it as mmio access. |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 3414 | */ |
| 3415 | if (pfn == KVM_PFN_ERR_RO_FAULT) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3416 | return RET_PF_EMULATE; |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 3417 | |
Xiao Guangrong | e6c1502 | 2012-08-03 15:38:36 +0800 | [diff] [blame] | 3418 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 3419 | kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 3420 | return RET_PF_RETRY; |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3421 | } |
Gleb Natapov | edba23e | 2010-07-07 20:16:45 +0300 | [diff] [blame] | 3422 | |
Sean Christopherson | 2c151b2 | 2018-03-29 14:48:30 -0700 | [diff] [blame] | 3423 | return -EFAULT; |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 3424 | } |
| 3425 | |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3426 | static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 3427 | kvm_pfn_t pfn, unsigned int access, |
| 3428 | int *ret_val) |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3429 | { |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3430 | /* The pfn is invalid, report the error! */ |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 3431 | if (unlikely(is_error_pfn(pfn))) { |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3432 | *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); |
Paolo Bonzini | 798e88b | 2016-02-23 15:28:51 +0100 | [diff] [blame] | 3433 | return true; |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3434 | } |
| 3435 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3436 | if (unlikely(is_noslot_pfn(pfn))) |
Sean Christopherson | 4af7715 | 2019-08-01 13:35:22 -0700 | [diff] [blame] | 3437 | vcpu_cache_mmio_info(vcpu, gva, gfn, |
| 3438 | access & shadow_mmio_access_mask); |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3439 | |
Paolo Bonzini | 798e88b | 2016-02-23 15:28:51 +0100 | [diff] [blame] | 3440 | return false; |
Xiao Guangrong | d7c5520 | 2011-07-12 03:29:38 +0800 | [diff] [blame] | 3441 | } |
| 3442 | |
Xiao Guangrong | e5552fd | 2013-07-30 21:01:59 +0800 | [diff] [blame] | 3443 | static bool page_fault_can_be_fast(u32 error_code) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3444 | { |
| 3445 | /* |
Xiao Guangrong | 1c118b8 | 2013-07-18 12:52:37 +0800 | [diff] [blame] | 3446 | * Do not fix the mmio spte with invalid generation number which |
| 3447 | * need to be updated by slow page fault path. |
| 3448 | */ |
| 3449 | if (unlikely(error_code & PFERR_RSVD_MASK)) |
| 3450 | return false; |
| 3451 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3452 | /* See if the page fault is due to an NX violation */ |
| 3453 | if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)) |
| 3454 | == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)))) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3455 | return false; |
| 3456 | |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3457 | /* |
| 3458 | * #PF can be fast if: |
| 3459 | * 1. The shadow page table entry is not present, which could mean that |
| 3460 | * the fault is potentially caused by access tracking (if enabled). |
| 3461 | * 2. The shadow page table entry is present and the fault |
| 3462 | * is caused by write-protect, that means we just need change the W |
| 3463 | * bit of the spte which can be done out of mmu-lock. |
| 3464 | * |
| 3465 | * However, if access tracking is disabled we know that a non-present |
| 3466 | * page must be a genuine page fault where we have to create a new SPTE. |
| 3467 | * So, if access tracking is disabled, we return true only for write |
| 3468 | * accesses to a present page. |
| 3469 | */ |
| 3470 | |
| 3471 | return shadow_acc_track_mask != 0 || |
| 3472 | ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)) |
| 3473 | == (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3474 | } |
| 3475 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3476 | /* |
| 3477 | * Returns true if the SPTE was fixed successfully. Otherwise, |
| 3478 | * someone else modified the SPTE from its original value. |
| 3479 | */ |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3480 | static bool |
Xiao Guangrong | 92a476c | 2014-04-17 17:06:13 +0800 | [diff] [blame] | 3481 | fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3482 | u64 *sptep, u64 old_spte, u64 new_spte) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3483 | { |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3484 | gfn_t gfn; |
| 3485 | |
| 3486 | WARN_ON(!sp->role.direct); |
| 3487 | |
Kai Huang | 9b51a63 | 2015-01-28 10:54:25 +0800 | [diff] [blame] | 3488 | /* |
| 3489 | * Theoretically we could also set dirty bit (and flush TLB) here in |
| 3490 | * order to eliminate unnecessary PML logging. See comments in |
| 3491 | * set_spte. But fast_page_fault is very unlikely to happen with PML |
| 3492 | * enabled, so we do not do this. This might result in the same GPA |
| 3493 | * to be logged in PML buffer again when the write really happens, and |
| 3494 | * eventually to be called by mark_page_dirty twice. But it's also no |
| 3495 | * harm. This also avoids the TLB flush needed after setting dirty bit |
| 3496 | * so non-PML cases won't be impacted. |
| 3497 | * |
| 3498 | * Compare with set_spte where instead shadow_dirty_mask is set. |
| 3499 | */ |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3500 | if (cmpxchg64(sptep, old_spte, new_spte) != old_spte) |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3501 | return false; |
| 3502 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3503 | if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) { |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3504 | /* |
| 3505 | * The gfn of direct spte is stable since it is |
| 3506 | * calculated by sp->gfn. |
| 3507 | */ |
| 3508 | gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); |
| 3509 | kvm_vcpu_mark_page_dirty(vcpu, gfn); |
| 3510 | } |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3511 | |
| 3512 | return true; |
| 3513 | } |
| 3514 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3515 | static bool is_access_allowed(u32 fault_err_code, u64 spte) |
| 3516 | { |
| 3517 | if (fault_err_code & PFERR_FETCH_MASK) |
| 3518 | return is_executable_pte(spte); |
| 3519 | |
| 3520 | if (fault_err_code & PFERR_WRITE_MASK) |
| 3521 | return is_writable_pte(spte); |
| 3522 | |
| 3523 | /* Fault was on Read access */ |
| 3524 | return spte & PT_PRESENT_MASK; |
| 3525 | } |
| 3526 | |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3527 | /* |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3528 | * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS. |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3529 | */ |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3530 | static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
| 3531 | u32 error_code) |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3532 | { |
| 3533 | struct kvm_shadow_walk_iterator iterator; |
Xiao Guangrong | 92a476c | 2014-04-17 17:06:13 +0800 | [diff] [blame] | 3534 | struct kvm_mmu_page *sp; |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3535 | int ret = RET_PF_INVALID; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3536 | u64 spte = 0ull; |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3537 | uint retry_count = 0; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3538 | |
Xiao Guangrong | e5552fd | 2013-07-30 21:01:59 +0800 | [diff] [blame] | 3539 | if (!page_fault_can_be_fast(error_code)) |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3540 | return ret; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3541 | |
| 3542 | walk_shadow_page_lockless_begin(vcpu); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3543 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3544 | do { |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3545 | u64 new_spte; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3546 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3547 | for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte) |
Sean Christopherson | f9fa250 | 2020-01-08 12:24:42 -0800 | [diff] [blame] | 3548 | if (!is_shadow_present_pte(spte)) |
Junaid Shahid | d162f30 | 2016-12-21 20:29:30 -0800 | [diff] [blame] | 3549 | break; |
| 3550 | |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 3551 | sp = sptep_to_sp(iterator.sptep); |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3552 | if (!is_last_spte(spte, sp->role.level)) |
| 3553 | break; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3554 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3555 | /* |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3556 | * Check whether the memory access that caused the fault would |
| 3557 | * still cause it if it were to be performed right now. If not, |
| 3558 | * then this is a spurious fault caused by TLB lazily flushed, |
| 3559 | * or some other CPU has already fixed the PTE after the |
| 3560 | * current CPU took the fault. |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3561 | * |
| 3562 | * Need not check the access of upper level table entries since |
| 3563 | * they are always ACC_ALL. |
| 3564 | */ |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3565 | if (is_access_allowed(error_code, spte)) { |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3566 | ret = RET_PF_SPURIOUS; |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3567 | break; |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3568 | } |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3569 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3570 | new_spte = spte; |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 3571 | |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3572 | if (is_access_track_spte(spte)) |
| 3573 | new_spte = restore_acc_track_spte(new_spte); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3574 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3575 | /* |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3576 | * Currently, to simplify the code, write-protection can |
| 3577 | * be removed in the fast path only if the SPTE was |
| 3578 | * write-protected for dirty-logging or access tracking. |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3579 | */ |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3580 | if ((error_code & PFERR_WRITE_MASK) && |
Miaohe Lin | e630269 | 2020-02-15 10:44:22 +0800 | [diff] [blame] | 3581 | spte_can_locklessly_be_made_writable(spte)) { |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3582 | new_spte |= PT_WRITABLE_MASK; |
| 3583 | |
| 3584 | /* |
| 3585 | * Do not fix write-permission on the large spte. Since |
| 3586 | * we only dirty the first page into the dirty-bitmap in |
| 3587 | * fast_pf_fix_direct_spte(), other pages are missed |
| 3588 | * if its slot has dirty logging enabled. |
| 3589 | * |
| 3590 | * Instead, we let the slow page fault path create a |
| 3591 | * normal spte to fix the access. |
| 3592 | * |
| 3593 | * See the comments in kvm_arch_commit_memory_region(). |
| 3594 | */ |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 3595 | if (sp->role.level > PG_LEVEL_4K) |
Junaid Shahid | d3e328f2 | 2016-12-21 20:29:32 -0800 | [diff] [blame] | 3596 | break; |
| 3597 | } |
| 3598 | |
| 3599 | /* Verify that the fault can be handled in the fast path */ |
| 3600 | if (new_spte == spte || |
| 3601 | !is_access_allowed(error_code, new_spte)) |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3602 | break; |
Xiao Guangrong | c126d94 | 2014-04-17 17:06:14 +0800 | [diff] [blame] | 3603 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3604 | /* |
| 3605 | * Currently, fast page fault only works for direct mapping |
| 3606 | * since the gfn is not stable for indirect shadow page. See |
Mauro Carvalho Chehab | 3ecad8c | 2020-04-14 18:48:36 +0200 | [diff] [blame] | 3607 | * Documentation/virt/kvm/locking.rst to get more detail. |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3608 | */ |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3609 | if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte, |
| 3610 | new_spte)) { |
| 3611 | ret = RET_PF_FIXED; |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3612 | break; |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3613 | } |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3614 | |
| 3615 | if (++retry_count > 4) { |
| 3616 | printk_once(KERN_WARNING |
| 3617 | "kvm: Fast #PF retrying more than 4 times.\n"); |
| 3618 | break; |
| 3619 | } |
| 3620 | |
Junaid Shahid | 97dceba | 2016-12-06 16:46:12 -0800 | [diff] [blame] | 3621 | } while (true); |
| 3622 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3623 | trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep, |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3624 | spte, ret); |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3625 | walk_shadow_page_lockless_end(vcpu); |
| 3626 | |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 3627 | return ret; |
Xiao Guangrong | c7ba5b4 | 2012-06-20 15:59:18 +0800 | [diff] [blame] | 3628 | } |
| 3629 | |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3630 | static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, |
| 3631 | struct list_head *invalid_list) |
| 3632 | { |
| 3633 | struct kvm_mmu_page *sp; |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 3634 | |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3635 | if (!VALID_PAGE(*root_hpa)) |
| 3636 | return; |
| 3637 | |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 3638 | sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3639 | --sp->root_count; |
| 3640 | if (!sp->root_count && sp->role.invalid) |
| 3641 | kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); |
| 3642 | |
| 3643 | *root_hpa = INVALID_PAGE; |
| 3644 | } |
| 3645 | |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3646 | /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */ |
Vitaly Kuznetsov | 6a82cd1 | 2018-10-08 21:28:07 +0200 | [diff] [blame] | 3647 | void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 3648 | ulong roots_to_free) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3649 | { |
Sean Christopherson | 4d710de | 2020-09-23 12:12:04 -0700 | [diff] [blame] | 3650 | struct kvm *kvm = vcpu->kvm; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3651 | int i; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3652 | LIST_HEAD(invalid_list); |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3653 | bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3654 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3655 | BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3656 | |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3657 | /* Before acquiring the MMU lock, see if we need to do any real work. */ |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3658 | if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) { |
| 3659 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 3660 | if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) && |
| 3661 | VALID_PAGE(mmu->prev_roots[i].hpa)) |
| 3662 | break; |
| 3663 | |
| 3664 | if (i == KVM_MMU_NUM_PREV_ROOTS) |
| 3665 | return; |
| 3666 | } |
Gleb Natapov | 35af577 | 2013-05-16 11:55:51 +0300 | [diff] [blame] | 3667 | |
Sean Christopherson | 4d710de | 2020-09-23 12:12:04 -0700 | [diff] [blame] | 3668 | spin_lock(&kvm->mmu_lock); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3669 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3670 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 3671 | if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) |
Sean Christopherson | 4d710de | 2020-09-23 12:12:04 -0700 | [diff] [blame] | 3672 | mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa, |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 3673 | &invalid_list); |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 3674 | |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3675 | if (free_active_root) { |
| 3676 | if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && |
| 3677 | (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { |
Sean Christopherson | 4d710de | 2020-09-23 12:12:04 -0700 | [diff] [blame] | 3678 | mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3679 | } else { |
| 3680 | for (i = 0; i < 4; ++i) |
| 3681 | if (mmu->pae_root[i] != 0) |
Sean Christopherson | 4d710de | 2020-09-23 12:12:04 -0700 | [diff] [blame] | 3682 | mmu_free_root_page(kvm, |
Junaid Shahid | 08fb59d | 2018-06-27 14:59:17 -0700 | [diff] [blame] | 3683 | &mmu->pae_root[i], |
| 3684 | &invalid_list); |
| 3685 | mmu->root_hpa = INVALID_PAGE; |
| 3686 | } |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3687 | mmu->root_pgd = 0; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3688 | } |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3689 | |
Sean Christopherson | 4d710de | 2020-09-23 12:12:04 -0700 | [diff] [blame] | 3690 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
| 3691 | spin_unlock(&kvm->mmu_lock); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3692 | } |
Junaid Shahid | 74b566e | 2018-05-04 11:37:11 -0700 | [diff] [blame] | 3693 | EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3694 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3695 | static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) |
| 3696 | { |
| 3697 | int ret = 0; |
| 3698 | |
Vitaly Kuznetsov | 995decb | 2020-07-08 16:00:23 +0200 | [diff] [blame] | 3699 | if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 3700 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3701 | ret = 1; |
| 3702 | } |
| 3703 | |
| 3704 | return ret; |
| 3705 | } |
| 3706 | |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3707 | static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, |
| 3708 | u8 level, bool direct) |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3709 | { |
| 3710 | struct kvm_mmu_page *sp; |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3711 | |
| 3712 | spin_lock(&vcpu->kvm->mmu_lock); |
| 3713 | |
| 3714 | if (make_mmu_pages_available(vcpu)) { |
| 3715 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3716 | return INVALID_PAGE; |
| 3717 | } |
| 3718 | sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL); |
| 3719 | ++sp->root_count; |
| 3720 | |
| 3721 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3722 | return __pa(sp->spt); |
| 3723 | } |
| 3724 | |
| 3725 | static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) |
| 3726 | { |
| 3727 | u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level; |
| 3728 | hpa_t root; |
Avi Kivity | 7ebaf15 | 2010-10-03 18:51:39 +0200 | [diff] [blame] | 3729 | unsigned i; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3730 | |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3731 | if (shadow_root_level >= PT64_ROOT_4LEVEL) { |
| 3732 | root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true); |
| 3733 | if (!VALID_PAGE(root)) |
Wanpeng Li | ed52870 | 2017-12-04 22:21:30 -0800 | [diff] [blame] | 3734 | return -ENOSPC; |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3735 | vcpu->arch.mmu->root_hpa = root; |
| 3736 | } else if (shadow_root_level == PT32E_ROOT_LEVEL) { |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3737 | for (i = 0; i < 4; ++i) { |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3738 | MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i])); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3739 | |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3740 | root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), |
| 3741 | i << 30, PT32_ROOT_LEVEL, true); |
| 3742 | if (!VALID_PAGE(root)) |
Wanpeng Li | ed52870 | 2017-12-04 22:21:30 -0800 | [diff] [blame] | 3743 | return -ENOSPC; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3744 | vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3745 | } |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3746 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3747 | } else |
| 3748 | BUG(); |
Sean Christopherson | 3651c7f | 2020-02-28 14:52:39 -0800 | [diff] [blame] | 3749 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3750 | /* root_pgd is ignored for direct MMUs. */ |
| 3751 | vcpu->arch.mmu->root_pgd = 0; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3752 | |
| 3753 | return 0; |
| 3754 | } |
| 3755 | |
| 3756 | static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3757 | { |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3758 | u64 pdptr, pm_mask; |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3759 | gfn_t root_gfn, root_pgd; |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3760 | hpa_t root; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3761 | int i; |
Avi Kivity | 3bb65a2 | 2007-01-05 16:36:51 -0800 | [diff] [blame] | 3762 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3763 | root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu); |
| 3764 | root_gfn = root_pgd >> PAGE_SHIFT; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3765 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3766 | if (mmu_check_root(vcpu, root_gfn)) |
| 3767 | return 1; |
| 3768 | |
| 3769 | /* |
| 3770 | * Do we shadow a long mode page table? If so we need to |
| 3771 | * write-protect the guests page table root. |
| 3772 | */ |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3773 | if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3774 | MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa)); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3775 | |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3776 | root = mmu_alloc_root(vcpu, root_gfn, 0, |
| 3777 | vcpu->arch.mmu->shadow_root_level, false); |
| 3778 | if (!VALID_PAGE(root)) |
Wanpeng Li | ed52870 | 2017-12-04 22:21:30 -0800 | [diff] [blame] | 3779 | return -ENOSPC; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3780 | vcpu->arch.mmu->root_hpa = root; |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3781 | goto set_root_pgd; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3782 | } |
Joerg Roedel | f87f928 | 2010-09-02 17:29:45 +0200 | [diff] [blame] | 3783 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3784 | /* |
| 3785 | * We shadow a 32 bit page table. This may be a legacy 2-level |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3786 | * or a PAE 3-level page table. In either case we need to be aware that |
| 3787 | * the shadow page table may be a PAE or a long mode page table. |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3788 | */ |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3789 | pm_mask = PT_PRESENT_MASK; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3790 | if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3791 | pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; |
| 3792 | |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3793 | for (i = 0; i < 4; ++i) { |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3794 | MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i])); |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3795 | if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { |
| 3796 | pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); |
Bandan Das | 812f30b | 2016-07-12 18:18:50 -0400 | [diff] [blame] | 3797 | if (!(pdptr & PT_PRESENT_MASK)) { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3798 | vcpu->arch.mmu->pae_root[i] = 0; |
Avi Kivity | 417726a | 2007-04-12 17:35:58 +0300 | [diff] [blame] | 3799 | continue; |
| 3800 | } |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 3801 | root_gfn = pdptr >> PAGE_SHIFT; |
Joerg Roedel | f87f928 | 2010-09-02 17:29:45 +0200 | [diff] [blame] | 3802 | if (mmu_check_root(vcpu, root_gfn)) |
| 3803 | return 1; |
Eric Northup | 5a7388c | 2010-04-26 17:00:05 -0700 | [diff] [blame] | 3804 | } |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 3805 | |
Sean Christopherson | 8123f26 | 2020-04-27 19:37:14 -0700 | [diff] [blame] | 3806 | root = mmu_alloc_root(vcpu, root_gfn, i << 30, |
| 3807 | PT32_ROOT_LEVEL, false); |
| 3808 | if (!VALID_PAGE(root)) |
| 3809 | return -ENOSPC; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3810 | vcpu->arch.mmu->pae_root[i] = root | pm_mask; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3811 | } |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3812 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3813 | |
| 3814 | /* |
| 3815 | * If we shadow a 32 bit page table with a long mode page |
| 3816 | * table we enter this path. |
| 3817 | */ |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3818 | if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { |
| 3819 | if (vcpu->arch.mmu->lm_root == NULL) { |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3820 | /* |
| 3821 | * The additional page necessary for this is only |
| 3822 | * allocated on demand. |
| 3823 | */ |
| 3824 | |
| 3825 | u64 *lm_root; |
| 3826 | |
Ben Gardon | 254272c | 2019-02-11 11:02:50 -0800 | [diff] [blame] | 3827 | lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3828 | if (lm_root == NULL) |
| 3829 | return 1; |
| 3830 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3831 | lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3832 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3833 | vcpu->arch.mmu->lm_root = lm_root; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3834 | } |
| 3835 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3836 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3837 | } |
| 3838 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 3839 | set_root_pgd: |
| 3840 | vcpu->arch.mmu->root_pgd = root_pgd; |
Vitaly Kuznetsov | ad7dc69 | 2019-02-22 17:45:01 +0100 | [diff] [blame] | 3841 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3842 | return 0; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3843 | } |
| 3844 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3845 | static int mmu_alloc_roots(struct kvm_vcpu *vcpu) |
| 3846 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3847 | if (vcpu->arch.mmu->direct_map) |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 3848 | return mmu_alloc_direct_roots(vcpu); |
| 3849 | else |
| 3850 | return mmu_alloc_shadow_roots(vcpu); |
| 3851 | } |
| 3852 | |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3853 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3854 | { |
| 3855 | int i; |
| 3856 | struct kvm_mmu_page *sp; |
| 3857 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3858 | if (vcpu->arch.mmu->direct_map) |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3859 | return; |
| 3860 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3861 | if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3862 | return; |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 3863 | |
David Matlack | 56f17dd | 2014-08-18 15:46:07 -0700 | [diff] [blame] | 3864 | vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3865 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3866 | if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { |
| 3867 | hpa_t root = vcpu->arch.mmu->root_hpa; |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 3868 | sp = to_shadow_page(root); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3869 | |
| 3870 | /* |
| 3871 | * Even if another CPU was marking the SP as unsync-ed |
| 3872 | * simultaneously, any guest page table changes are not |
| 3873 | * guaranteed to be visible anyway until this VCPU issues a TLB |
| 3874 | * flush strictly after those changes are made. We only need to |
| 3875 | * ensure that the other CPU sets these flags before any actual |
| 3876 | * changes to the page tables are made. The comments in |
| 3877 | * mmu_need_write_protect() describe what could go wrong if this |
| 3878 | * requirement isn't satisfied. |
| 3879 | */ |
| 3880 | if (!smp_load_acquire(&sp->unsync) && |
| 3881 | !smp_load_acquire(&sp->unsync_children)) |
| 3882 | return; |
| 3883 | |
| 3884 | spin_lock(&vcpu->kvm->mmu_lock); |
| 3885 | kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); |
| 3886 | |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3887 | mmu_sync_children(vcpu, sp); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3888 | |
Xiao Guangrong | 0375f7f | 2011-11-28 20:41:00 +0800 | [diff] [blame] | 3889 | kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3890 | spin_unlock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3891 | return; |
| 3892 | } |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3893 | |
| 3894 | spin_lock(&vcpu->kvm->mmu_lock); |
| 3895 | kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); |
| 3896 | |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3897 | for (i = 0; i < 4; ++i) { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 3898 | hpa_t root = vcpu->arch.mmu->pae_root[i]; |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3899 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3900 | if (root && VALID_PAGE(root)) { |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3901 | root &= PT64_BASE_ADDR_MASK; |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 3902 | sp = to_shadow_page(root); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3903 | mmu_sync_children(vcpu, sp); |
| 3904 | } |
| 3905 | } |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3906 | |
Junaid Shahid | 578e1c4 | 2018-06-27 14:59:05 -0700 | [diff] [blame] | 3907 | kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3908 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3909 | } |
Nadav Har'El | bfd0a56 | 2013-08-05 11:07:17 +0300 | [diff] [blame] | 3910 | EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3911 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3912 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr, |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3913 | u32 access, struct x86_exception *exception) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3914 | { |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3915 | if (exception) |
| 3916 | exception->error_code = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3917 | return vaddr; |
| 3918 | } |
| 3919 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 3920 | static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr, |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3921 | u32 access, |
| 3922 | struct x86_exception *exception) |
Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 3923 | { |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 3924 | if (exception) |
| 3925 | exception->error_code = 0; |
Paolo Bonzini | 54987b7 | 2014-09-02 13:23:06 +0200 | [diff] [blame] | 3926 | return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); |
Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 3927 | } |
| 3928 | |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3929 | static bool |
| 3930 | __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) |
| 3931 | { |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3932 | int bit7 = (pte >> 7) & 1; |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3933 | |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3934 | return pte & rsvd_check->rsvd_bits_mask[bit7][level-1]; |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3935 | } |
| 3936 | |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3937 | static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte) |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3938 | { |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3939 | return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f); |
Xiao Guangrong | d625b15 | 2015-08-05 12:04:25 +0800 | [diff] [blame] | 3940 | } |
| 3941 | |
Takuya Yoshikawa | ded5874 | 2016-02-22 17:23:40 +0900 | [diff] [blame] | 3942 | static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3943 | { |
Paolo Bonzini | 9034e6e | 2017-08-17 18:36:58 +0200 | [diff] [blame] | 3944 | /* |
| 3945 | * A nested guest cannot use the MMIO cache if it is using nested |
| 3946 | * page tables, because cr2 is a nGPA while the cache stores GPAs. |
| 3947 | */ |
| 3948 | if (mmu_is_nested(vcpu)) |
| 3949 | return false; |
| 3950 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3951 | if (direct) |
| 3952 | return vcpu_match_mmio_gpa(vcpu, addr); |
| 3953 | |
| 3954 | return vcpu_match_mmio_gva(vcpu, addr); |
| 3955 | } |
| 3956 | |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3957 | /* return true if reserved bit is detected on spte. */ |
| 3958 | static bool |
| 3959 | walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3960 | { |
| 3961 | struct kvm_shadow_walk_iterator iterator; |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 3962 | u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull; |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3963 | struct rsvd_bits_validate *rsvd_check; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3964 | int root, leaf; |
| 3965 | bool reserved = false; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3966 | |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3967 | rsvd_check = &vcpu->arch.mmu->shadow_zero_check; |
Marcelo Tosatti | 37f6a4e | 2014-01-03 17:09:32 -0200 | [diff] [blame] | 3968 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3969 | walk_shadow_page_lockless_begin(vcpu); |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3970 | |
Paolo Bonzini | 29ecd66 | 2015-09-06 16:24:50 +0200 | [diff] [blame] | 3971 | for (shadow_walk_init(&iterator, vcpu, addr), |
| 3972 | leaf = root = iterator.level; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3973 | shadow_walk_okay(&iterator); |
| 3974 | __shadow_walk_next(&iterator, spte)) { |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3975 | spte = mmu_spte_get_lockless(iterator.sptep); |
| 3976 | |
| 3977 | sptes[leaf - 1] = spte; |
Paolo Bonzini | 29ecd66 | 2015-09-06 16:24:50 +0200 | [diff] [blame] | 3978 | leaf--; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3979 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3980 | if (!is_shadow_present_pte(spte)) |
| 3981 | break; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3982 | |
Sean Christopherson | b5c3c1b | 2020-01-09 15:06:40 -0800 | [diff] [blame] | 3983 | /* |
| 3984 | * Use a bitwise-OR instead of a logical-OR to aggregate the |
| 3985 | * reserved bit and EPT's invalid memtype/XWR checks to avoid |
| 3986 | * adding a Jcc in the loop. |
| 3987 | */ |
| 3988 | reserved |= __is_bad_mt_xwr(rsvd_check, spte) | |
| 3989 | __is_rsvd_bits_set(rsvd_check, spte, iterator.level); |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3990 | } |
| 3991 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 3992 | walk_shadow_page_lockless_end(vcpu); |
| 3993 | |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3994 | if (reserved) { |
| 3995 | pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", |
| 3996 | __func__, addr); |
Paolo Bonzini | 29ecd66 | 2015-09-06 16:24:50 +0200 | [diff] [blame] | 3997 | while (root > leaf) { |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 3998 | pr_err("------ spte 0x%llx level %d.\n", |
| 3999 | sptes[root - 1], root); |
| 4000 | root--; |
| 4001 | } |
| 4002 | } |
Sean Christopherson | ddce620 | 2019-12-06 15:57:27 -0800 | [diff] [blame] | 4003 | |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 4004 | *sptep = spte; |
| 4005 | return reserved; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4006 | } |
| 4007 | |
Paolo Bonzini | e08d26f | 2017-08-17 18:36:56 +0200 | [diff] [blame] | 4008 | static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4009 | { |
| 4010 | u64 spte; |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 4011 | bool reserved; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4012 | |
Takuya Yoshikawa | ded5874 | 2016-02-22 17:23:40 +0900 | [diff] [blame] | 4013 | if (mmio_info_in_cache(vcpu, addr, direct)) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 4014 | return RET_PF_EMULATE; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4015 | |
Xiao Guangrong | 47ab875 | 2015-08-05 12:04:26 +0800 | [diff] [blame] | 4016 | reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); |
Paolo Bonzini | 450869d | 2015-11-04 13:41:21 +0100 | [diff] [blame] | 4017 | if (WARN_ON(reserved)) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 4018 | return -EINVAL; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4019 | |
| 4020 | if (is_mmio_spte(spte)) { |
| 4021 | gfn_t gfn = get_mmio_spte_gfn(spte); |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 4022 | unsigned int access = get_mmio_spte_access(spte); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4023 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 4024 | if (!check_mmio_spte(vcpu, spte)) |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 4025 | return RET_PF_INVALID; |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 4026 | |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4027 | if (direct) |
| 4028 | addr = 0; |
Xiao Guangrong | 4f02264 | 2011-07-12 03:34:24 +0800 | [diff] [blame] | 4029 | |
| 4030 | trace_handle_mmio_page_fault(addr, gfn, access); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4031 | vcpu_cache_mmio_info(vcpu, addr, gfn, access); |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 4032 | return RET_PF_EMULATE; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4033 | } |
| 4034 | |
| 4035 | /* |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4036 | * If the page table is zapped by other cpus, let CPU fault again on |
| 4037 | * the address. |
| 4038 | */ |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 4039 | return RET_PF_RETRY; |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4040 | } |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4041 | |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 4042 | static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, |
| 4043 | u32 error_code, gfn_t gfn) |
| 4044 | { |
| 4045 | if (unlikely(error_code & PFERR_RSVD_MASK)) |
| 4046 | return false; |
| 4047 | |
| 4048 | if (!(error_code & PFERR_PRESENT_MASK) || |
| 4049 | !(error_code & PFERR_WRITE_MASK)) |
| 4050 | return false; |
| 4051 | |
| 4052 | /* |
| 4053 | * guest is writing the page which is write tracked which can |
| 4054 | * not be fixed by page fault handler. |
| 4055 | */ |
| 4056 | if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) |
| 4057 | return true; |
| 4058 | |
| 4059 | return false; |
| 4060 | } |
| 4061 | |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 4062 | static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) |
| 4063 | { |
| 4064 | struct kvm_shadow_walk_iterator iterator; |
| 4065 | u64 spte; |
| 4066 | |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 4067 | walk_shadow_page_lockless_begin(vcpu); |
| 4068 | for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { |
| 4069 | clear_sp_write_flooding_count(iterator.sptep); |
| 4070 | if (!is_shadow_present_pte(spte)) |
| 4071 | break; |
| 4072 | } |
| 4073 | walk_shadow_page_lockless_end(vcpu); |
| 4074 | } |
| 4075 | |
Vitaly Kuznetsov | e8c2226 | 2020-06-15 14:13:34 +0200 | [diff] [blame] | 4076 | static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
| 4077 | gfn_t gfn) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4078 | { |
| 4079 | struct kvm_arch_async_pf arch; |
Xiao Guangrong | fb67e14 | 2010-12-07 10:35:25 +0800 | [diff] [blame] | 4080 | |
Gleb Natapov | 7c90705 | 2010-10-14 11:22:53 +0200 | [diff] [blame] | 4081 | arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4082 | arch.gfn = gfn; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 4083 | arch.direct_map = vcpu->arch.mmu->direct_map; |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 4084 | arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4085 | |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4086 | return kvm_setup_async_pf(vcpu, cr2_or_gpa, |
| 4087 | kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4088 | } |
| 4089 | |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 4090 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4091 | gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write, |
| 4092 | bool *writable) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4093 | { |
Paolo Bonzini | c36b715 | 2020-04-16 09:48:07 -0400 | [diff] [blame] | 4094 | struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4095 | bool async; |
| 4096 | |
Paolo Bonzini | c36b715 | 2020-04-16 09:48:07 -0400 | [diff] [blame] | 4097 | /* Don't expose private memslots to L2. */ |
| 4098 | if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) { |
Jim Mattson | 3a2936d | 2018-05-09 17:02:05 -0400 | [diff] [blame] | 4099 | *pfn = KVM_PFN_NOSLOT; |
Sean Christopherson | c583eed | 2020-04-15 14:44:13 -0700 | [diff] [blame] | 4100 | *writable = false; |
Jim Mattson | 3a2936d | 2018-05-09 17:02:05 -0400 | [diff] [blame] | 4101 | return false; |
| 4102 | } |
| 4103 | |
Paolo Bonzini | 3520469 | 2015-04-02 11:20:48 +0200 | [diff] [blame] | 4104 | async = false; |
| 4105 | *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4106 | if (!async) |
| 4107 | return false; /* *pfn has correct page already */ |
| 4108 | |
Wanpeng Li | 9bc1f09 | 2017-06-08 20:13:40 -0700 | [diff] [blame] | 4109 | if (!prefault && kvm_can_do_async_pf(vcpu)) { |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4110 | trace_kvm_try_async_get_page(cr2_or_gpa, gfn); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4111 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4112 | trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4113 | kvm_make_request(KVM_REQ_APF_HALT, vcpu); |
| 4114 | return true; |
Sean Christopherson | 9f1a852 | 2019-12-06 15:57:17 -0800 | [diff] [blame] | 4115 | } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn)) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4116 | return true; |
| 4117 | } |
| 4118 | |
Paolo Bonzini | 3520469 | 2015-04-02 11:20:48 +0200 | [diff] [blame] | 4119 | *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 4120 | return false; |
| 4121 | } |
| 4122 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4123 | static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, |
| 4124 | bool prefault, int max_level, bool is_tdp) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4125 | { |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 4126 | bool write = error_code & PFERR_WRITE_MASK; |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4127 | bool map_writable; |
Avi Kivity | ebeace8 | 2007-01-05 16:36:47 -0800 | [diff] [blame] | 4128 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4129 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 4130 | unsigned long mmu_seq; |
| 4131 | kvm_pfn_t pfn; |
Sean Christopherson | 83f06fa | 2020-01-08 12:24:43 -0800 | [diff] [blame] | 4132 | int r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4133 | |
| 4134 | if (page_fault_handle_page_track(vcpu, error_code, gfn)) |
| 4135 | return RET_PF_EMULATE; |
| 4136 | |
Sean Christopherson | c4371c2 | 2020-09-23 15:04:24 -0700 | [diff] [blame] | 4137 | r = fast_page_fault(vcpu, gpa, error_code); |
| 4138 | if (r != RET_PF_INVALID) |
| 4139 | return r; |
Sean Christopherson | 8329144 | 2020-07-02 19:35:30 -0700 | [diff] [blame] | 4140 | |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 4141 | r = mmu_topup_memory_caches(vcpu, false); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4142 | if (r) |
| 4143 | return r; |
| 4144 | |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 4145 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
| 4146 | smp_rmb(); |
| 4147 | |
| 4148 | if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) |
| 4149 | return RET_PF_RETRY; |
| 4150 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4151 | if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r)) |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 4152 | return r; |
| 4153 | |
| 4154 | r = RET_PF_RETRY; |
| 4155 | spin_lock(&vcpu->kvm->mmu_lock); |
| 4156 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
| 4157 | goto out_unlock; |
Sean Christopherson | 7bd7ded | 2020-06-23 12:35:42 -0700 | [diff] [blame] | 4158 | r = make_mmu_pages_available(vcpu); |
| 4159 | if (r) |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 4160 | goto out_unlock; |
Sean Christopherson | 6c2fd34 | 2020-09-23 11:37:30 -0700 | [diff] [blame] | 4161 | r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn, |
| 4162 | prefault, is_tdp); |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4163 | |
Sean Christopherson | 367fd79 | 2019-12-06 15:57:16 -0800 | [diff] [blame] | 4164 | out_unlock: |
| 4165 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 4166 | kvm_release_pfn_clean(pfn); |
| 4167 | return r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4168 | } |
| 4169 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4170 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, |
| 4171 | u32 error_code, bool prefault) |
| 4172 | { |
| 4173 | pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code); |
| 4174 | |
| 4175 | /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */ |
| 4176 | return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault, |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 4177 | PG_LEVEL_2M, false); |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4178 | } |
| 4179 | |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4180 | int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 4181 | u64 fault_address, char *insn, int insn_len) |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4182 | { |
| 4183 | int r = 1; |
Vitaly Kuznetsov | 9ce372b | 2020-05-07 16:36:02 +0200 | [diff] [blame] | 4184 | u32 flags = vcpu->arch.apf.host_apf_flags; |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4185 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 4186 | #ifndef CONFIG_X86_64 |
| 4187 | /* A 64-bit CR2 should be impossible on 32-bit KVM. */ |
| 4188 | if (WARN_ON_ONCE(fault_address >> 32)) |
| 4189 | return -EFAULT; |
| 4190 | #endif |
| 4191 | |
Paolo Bonzini | c595cee | 2018-07-02 13:07:14 +0200 | [diff] [blame] | 4192 | vcpu->arch.l1tf_flush_l1d = true; |
Vitaly Kuznetsov | 9ce372b | 2020-05-07 16:36:02 +0200 | [diff] [blame] | 4193 | if (!flags) { |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4194 | trace_kvm_page_fault(fault_address, error_code); |
| 4195 | |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 4196 | if (kvm_event_needs_reinjection(vcpu)) |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4197 | kvm_mmu_unprotect_page_virt(vcpu, fault_address); |
| 4198 | r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn, |
| 4199 | insn_len); |
Vitaly Kuznetsov | 9ce372b | 2020-05-07 16:36:02 +0200 | [diff] [blame] | 4200 | } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) { |
Vitaly Kuznetsov | 68fd66f | 2020-05-25 16:41:17 +0200 | [diff] [blame] | 4201 | vcpu->arch.apf.host_apf_flags = 0; |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4202 | local_irq_disable(); |
Thomas Gleixner | 6bca69a | 2020-03-07 00:42:06 +0100 | [diff] [blame] | 4203 | kvm_async_pf_task_wait_schedule(fault_address); |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4204 | local_irq_enable(); |
Vitaly Kuznetsov | 9ce372b | 2020-05-07 16:36:02 +0200 | [diff] [blame] | 4205 | } else { |
| 4206 | WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags); |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4207 | } |
Vitaly Kuznetsov | 9ce372b | 2020-05-07 16:36:02 +0200 | [diff] [blame] | 4208 | |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 4209 | return r; |
| 4210 | } |
| 4211 | EXPORT_SYMBOL_GPL(kvm_handle_page_fault); |
| 4212 | |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 4213 | int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, |
| 4214 | bool prefault) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4215 | { |
Sean Christopherson | cb9b88c | 2019-12-06 15:57:18 -0800 | [diff] [blame] | 4216 | int max_level; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4217 | |
Sean Christopherson | e662ec3 | 2020-04-27 17:54:21 -0700 | [diff] [blame] | 4218 | for (max_level = KVM_MAX_HUGEPAGE_LEVEL; |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 4219 | max_level > PG_LEVEL_4K; |
Sean Christopherson | cb9b88c | 2019-12-06 15:57:18 -0800 | [diff] [blame] | 4220 | max_level--) { |
| 4221 | int page_num = KVM_PAGES_PER_HPAGE(max_level); |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4222 | gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4223 | |
Sean Christopherson | cb9b88c | 2019-12-06 15:57:18 -0800 | [diff] [blame] | 4224 | if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num)) |
| 4225 | break; |
Takuya Yoshikawa | fd13690 | 2015-10-16 17:06:02 +0900 | [diff] [blame] | 4226 | } |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 4227 | |
Sean Christopherson | 0f90e1c | 2019-12-06 15:57:24 -0800 | [diff] [blame] | 4228 | return direct_page_fault(vcpu, gpa, error_code, prefault, |
| 4229 | max_level, true); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4230 | } |
| 4231 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4232 | static void nonpaging_init_context(struct kvm_vcpu *vcpu, |
| 4233 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4234 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4235 | context->page_fault = nonpaging_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4236 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4237 | context->sync_page = nonpaging_sync_page; |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 4238 | context->invlpg = NULL; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 4239 | context->update_pte = nonpaging_update_pte; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 4240 | context->root_level = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4241 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4242 | context->direct_map = true; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4243 | context->nx = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4244 | } |
| 4245 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4246 | static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd, |
Sean Christopherson | 0be4435 | 2020-02-28 14:52:40 -0800 | [diff] [blame] | 4247 | union kvm_mmu_page_role role) |
| 4248 | { |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4249 | return (role.direct || pgd == root->pgd) && |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 4250 | VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) && |
| 4251 | role.word == to_shadow_page(root->hpa)->role.word; |
Sean Christopherson | 0be4435 | 2020-02-28 14:52:40 -0800 | [diff] [blame] | 4252 | } |
| 4253 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4254 | /* |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4255 | * Find out if a previously cached root matching the new pgd/role is available. |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4256 | * The current root is also inserted into the cache. |
| 4257 | * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is |
| 4258 | * returned. |
| 4259 | * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and |
| 4260 | * false is returned. This root should now be freed by the caller. |
| 4261 | */ |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4262 | static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd, |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4263 | union kvm_mmu_page_role new_role) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4264 | { |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4265 | uint i; |
| 4266 | struct kvm_mmu_root_info root; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 4267 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4268 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4269 | root.pgd = mmu->root_pgd; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4270 | root.hpa = mmu->root_hpa; |
| 4271 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4272 | if (is_root_usable(&root, new_pgd, new_role)) |
Sean Christopherson | 0be4435 | 2020-02-28 14:52:40 -0800 | [diff] [blame] | 4273 | return true; |
| 4274 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4275 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { |
| 4276 | swap(root, mmu->prev_roots[i]); |
| 4277 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4278 | if (is_root_usable(&root, new_pgd, new_role)) |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4279 | break; |
| 4280 | } |
| 4281 | |
| 4282 | mmu->root_hpa = root.hpa; |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4283 | mmu->root_pgd = root.pgd; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 4284 | |
| 4285 | return i < KVM_MMU_NUM_PREV_ROOTS; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4286 | } |
| 4287 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4288 | static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd, |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 4289 | union kvm_mmu_page_role new_role) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4290 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 4291 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 4292 | |
| 4293 | /* |
| 4294 | * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid |
| 4295 | * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs |
| 4296 | * later if necessary. |
| 4297 | */ |
| 4298 | if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 4299 | mmu->root_level >= PT64_ROOT_4LEVEL) |
Vitaly Kuznetsov | fe9304d | 2020-07-10 16:11:57 +0200 | [diff] [blame] | 4300 | return cached_root_available(vcpu, new_pgd, new_role); |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 4301 | |
| 4302 | return false; |
| 4303 | } |
| 4304 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4305 | static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, |
Junaid Shahid | ade61e2 | 2018-06-27 14:59:15 -0700 | [diff] [blame] | 4306 | union kvm_mmu_page_role new_role, |
Sean Christopherson | 4a632ac | 2020-03-20 14:28:27 -0700 | [diff] [blame] | 4307 | bool skip_tlb_flush, bool skip_mmu_sync) |
Junaid Shahid | 0aab33e | 2018-06-27 14:59:09 -0700 | [diff] [blame] | 4308 | { |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4309 | if (!fast_pgd_switch(vcpu, new_pgd, new_role)) { |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 4310 | kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT); |
| 4311 | return; |
| 4312 | } |
| 4313 | |
| 4314 | /* |
| 4315 | * It's possible that the cached previous root page is obsolete because |
| 4316 | * of a change in the MMU generation number. However, changing the |
| 4317 | * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will |
| 4318 | * free the root set here and allocate a new one. |
| 4319 | */ |
| 4320 | kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); |
| 4321 | |
Sean Christopherson | 71fe701 | 2020-03-20 14:28:28 -0700 | [diff] [blame] | 4322 | if (!skip_mmu_sync || force_flush_and_sync_on_reuse) |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 4323 | kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); |
Sean Christopherson | 71fe701 | 2020-03-20 14:28:28 -0700 | [diff] [blame] | 4324 | if (!skip_tlb_flush || force_flush_and_sync_on_reuse) |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 4325 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
Sean Christopherson | b869855 | 2020-03-20 14:28:26 -0700 | [diff] [blame] | 4326 | |
| 4327 | /* |
| 4328 | * The last MMIO access's GVA and GPA are cached in the VCPU. When |
| 4329 | * switching to a new CR3, that GVA->GPA mapping may no longer be |
| 4330 | * valid. So clear any cached MMIO info even when we don't need to sync |
| 4331 | * the shadow page tables. |
| 4332 | */ |
| 4333 | vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); |
| 4334 | |
Sean Christopherson | e47c4ae | 2020-06-22 13:20:34 -0700 | [diff] [blame] | 4335 | __clear_sp_write_flooding_count(to_shadow_page(vcpu->arch.mmu->root_hpa)); |
Junaid Shahid | 0aab33e | 2018-06-27 14:59:09 -0700 | [diff] [blame] | 4336 | } |
| 4337 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4338 | void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush, |
Sean Christopherson | 4a632ac | 2020-03-20 14:28:27 -0700 | [diff] [blame] | 4339 | bool skip_mmu_sync) |
Junaid Shahid | 7c390d3 | 2018-06-27 14:59:06 -0700 | [diff] [blame] | 4340 | { |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4341 | __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu), |
Sean Christopherson | 4a632ac | 2020-03-20 14:28:27 -0700 | [diff] [blame] | 4342 | skip_tlb_flush, skip_mmu_sync); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4343 | } |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 4344 | EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4345 | |
Joerg Roedel | 5777ed3 | 2010-09-10 17:30:42 +0200 | [diff] [blame] | 4346 | static unsigned long get_cr3(struct kvm_vcpu *vcpu) |
| 4347 | { |
Avi Kivity | 9f8fe50 | 2010-12-05 17:30:00 +0200 | [diff] [blame] | 4348 | return kvm_read_cr3(vcpu); |
Joerg Roedel | 5777ed3 | 2010-09-10 17:30:42 +0200 | [diff] [blame] | 4349 | } |
| 4350 | |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 4351 | static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, |
Ben Gardon | 0a2b64c | 2020-02-03 15:09:09 -0800 | [diff] [blame] | 4352 | unsigned int access, int *nr_present) |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4353 | { |
| 4354 | if (unlikely(is_mmio_spte(*sptep))) { |
| 4355 | if (gfn != get_mmio_spte_gfn(*sptep)) { |
| 4356 | mmu_spte_clear_no_track(sptep); |
| 4357 | return true; |
| 4358 | } |
| 4359 | |
| 4360 | (*nr_present)++; |
Paolo Bonzini | 54bf36a | 2015-04-08 15:39:23 +0200 | [diff] [blame] | 4361 | mark_mmio_spte(vcpu, sptep, gfn, access); |
Xiao Guangrong | ce88dec | 2011-07-12 03:33:44 +0800 | [diff] [blame] | 4362 | return true; |
| 4363 | } |
| 4364 | |
| 4365 | return false; |
| 4366 | } |
| 4367 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4368 | static inline bool is_last_gpte(struct kvm_mmu *mmu, |
| 4369 | unsigned level, unsigned gpte) |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4370 | { |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4371 | /* |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4372 | * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. |
| 4373 | * If it is clear, there are no large pages at this level, so clear |
| 4374 | * PT_PAGE_SIZE_MASK in gpte if that is the case. |
| 4375 | */ |
| 4376 | gpte &= level - mmu->last_nonleaf_level; |
| 4377 | |
Ladi Prosek | 829ee27 | 2017-10-05 11:10:23 +0200 | [diff] [blame] | 4378 | /* |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 4379 | * PG_LEVEL_4K always terminates. The RHS has bit 7 set |
| 4380 | * iff level <= PG_LEVEL_4K, which for our purpose means |
| 4381 | * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then. |
Ladi Prosek | 829ee27 | 2017-10-05 11:10:23 +0200 | [diff] [blame] | 4382 | */ |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 4383 | gpte |= level - PG_LEVEL_4K - 1; |
Ladi Prosek | 829ee27 | 2017-10-05 11:10:23 +0200 | [diff] [blame] | 4384 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4385 | return gpte & PT_PAGE_SIZE_MASK; |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4386 | } |
| 4387 | |
Nadav Har'El | 37406aa | 2013-08-05 11:07:12 +0300 | [diff] [blame] | 4388 | #define PTTYPE_EPT 18 /* arbitrary */ |
| 4389 | #define PTTYPE PTTYPE_EPT |
| 4390 | #include "paging_tmpl.h" |
| 4391 | #undef PTTYPE |
| 4392 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4393 | #define PTTYPE 64 |
| 4394 | #include "paging_tmpl.h" |
| 4395 | #undef PTTYPE |
| 4396 | |
| 4397 | #define PTTYPE 32 |
| 4398 | #include "paging_tmpl.h" |
| 4399 | #undef PTTYPE |
| 4400 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4401 | static void |
| 4402 | __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, |
| 4403 | struct rsvd_bits_validate *rsvd_check, |
| 4404 | int maxphyaddr, int level, bool nx, bool gbpages, |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4405 | bool pse, bool amd) |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4406 | { |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4407 | u64 exb_bit_rsvd = 0; |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 4408 | u64 gbpages_bit_rsvd = 0; |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 4409 | u64 nonleaf_bit8_rsvd = 0; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4410 | |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4411 | rsvd_check->bad_mt_xwr = 0; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4412 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4413 | if (!nx) |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4414 | exb_bit_rsvd = rsvd_bits(63, 63); |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4415 | if (!gbpages) |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 4416 | gbpages_bit_rsvd = rsvd_bits(7, 7); |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 4417 | |
| 4418 | /* |
| 4419 | * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for |
| 4420 | * leaf entries) on AMD CPUs only. |
| 4421 | */ |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4422 | if (amd) |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 4423 | nonleaf_bit8_rsvd = rsvd_bits(8, 8); |
| 4424 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4425 | switch (level) { |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4426 | case PT32_ROOT_LEVEL: |
| 4427 | /* no rsvd bits for 2 level 4K page table entries */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4428 | rsvd_check->rsvd_bits_mask[0][1] = 0; |
| 4429 | rsvd_check->rsvd_bits_mask[0][0] = 0; |
| 4430 | rsvd_check->rsvd_bits_mask[1][0] = |
| 4431 | rsvd_check->rsvd_bits_mask[0][0]; |
Xiao Guangrong | f815bce | 2010-03-19 17:58:53 +0800 | [diff] [blame] | 4432 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4433 | if (!pse) { |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4434 | rsvd_check->rsvd_bits_mask[1][1] = 0; |
Xiao Guangrong | f815bce | 2010-03-19 17:58:53 +0800 | [diff] [blame] | 4435 | break; |
| 4436 | } |
| 4437 | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4438 | if (is_cpuid_PSE36()) |
| 4439 | /* 36bits PSE 4MB page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4440 | rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4441 | else |
| 4442 | /* 32 bits PSE 4MB page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4443 | rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4444 | break; |
| 4445 | case PT32E_ROOT_LEVEL: |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4446 | rsvd_check->rsvd_bits_mask[0][2] = |
Dong, Eddie | 20c466b | 2009-03-31 23:03:45 +0800 | [diff] [blame] | 4447 | rsvd_bits(maxphyaddr, 63) | |
Nadav Amit | cd9ae5f | 2014-04-04 06:31:04 +0300 | [diff] [blame] | 4448 | rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4449 | rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | |
Sheng Yang | 4c26b4c | 2009-04-02 10:28:37 +0800 | [diff] [blame] | 4450 | rsvd_bits(maxphyaddr, 62); /* PDE */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4451 | rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4452 | rsvd_bits(maxphyaddr, 62); /* PTE */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4453 | rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4454 | rsvd_bits(maxphyaddr, 62) | |
| 4455 | rsvd_bits(13, 20); /* large page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4456 | rsvd_check->rsvd_bits_mask[1][0] = |
| 4457 | rsvd_check->rsvd_bits_mask[0][0]; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4458 | break; |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4459 | case PT64_ROOT_5LEVEL: |
| 4460 | rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd | |
| 4461 | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | |
| 4462 | rsvd_bits(maxphyaddr, 51); |
| 4463 | rsvd_check->rsvd_bits_mask[1][4] = |
| 4464 | rsvd_check->rsvd_bits_mask[0][4]; |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 4465 | fallthrough; |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 4466 | case PT64_ROOT_4LEVEL: |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4467 | rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | |
| 4468 | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | |
Sheng Yang | 4c26b4c | 2009-04-02 10:28:37 +0800 | [diff] [blame] | 4469 | rsvd_bits(maxphyaddr, 51); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4470 | rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | |
Paolo Bonzini | 5ecad24 | 2020-06-30 07:07:20 -0400 | [diff] [blame] | 4471 | gbpages_bit_rsvd | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4472 | rsvd_bits(maxphyaddr, 51); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4473 | rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | |
| 4474 | rsvd_bits(maxphyaddr, 51); |
| 4475 | rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | |
| 4476 | rsvd_bits(maxphyaddr, 51); |
| 4477 | rsvd_check->rsvd_bits_mask[1][3] = |
| 4478 | rsvd_check->rsvd_bits_mask[0][3]; |
| 4479 | rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd | |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 4480 | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 4481 | rsvd_bits(13, 29); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4482 | rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | |
Sheng Yang | 4c26b4c | 2009-04-02 10:28:37 +0800 | [diff] [blame] | 4483 | rsvd_bits(maxphyaddr, 51) | |
| 4484 | rsvd_bits(13, 20); /* large page */ |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4485 | rsvd_check->rsvd_bits_mask[1][0] = |
| 4486 | rsvd_check->rsvd_bits_mask[0][0]; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 4487 | break; |
| 4488 | } |
| 4489 | } |
| 4490 | |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4491 | static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, |
| 4492 | struct kvm_mmu *context) |
| 4493 | { |
| 4494 | __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, |
| 4495 | cpuid_maxphyaddr(vcpu), context->root_level, |
Radim Krčmář | d6321d4 | 2017-08-05 00:12:49 +0200 | [diff] [blame] | 4496 | context->nx, |
| 4497 | guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), |
Sean Christopherson | 23493d0 | 2020-03-04 17:34:33 -0800 | [diff] [blame] | 4498 | is_pse(vcpu), |
| 4499 | guest_cpuid_is_amd_or_hygon(vcpu)); |
Xiao Guangrong | 6dc98b8 | 2015-08-05 12:04:22 +0800 | [diff] [blame] | 4500 | } |
| 4501 | |
Xiao Guangrong | 81b8eeb | 2015-08-05 12:04:23 +0800 | [diff] [blame] | 4502 | static void |
| 4503 | __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, |
| 4504 | int maxphyaddr, bool execonly) |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4505 | { |
Paolo Bonzini | 951f9fd | 2015-09-23 10:34:26 +0200 | [diff] [blame] | 4506 | u64 bad_mt_xwr; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4507 | |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4508 | rsvd_check->rsvd_bits_mask[0][4] = |
| 4509 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4510 | rsvd_check->rsvd_bits_mask[0][3] = |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4511 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4512 | rsvd_check->rsvd_bits_mask[0][2] = |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4513 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4514 | rsvd_check->rsvd_bits_mask[0][1] = |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4515 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4516 | rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4517 | |
| 4518 | /* large page */ |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4519 | rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4520 | rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; |
| 4521 | rsvd_check->rsvd_bits_mask[1][2] = |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4522 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4523 | rsvd_check->rsvd_bits_mask[1][1] = |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4524 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); |
Xiao Guangrong | a0a64f5 | 2015-08-05 12:04:21 +0800 | [diff] [blame] | 4525 | rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4526 | |
Paolo Bonzini | 951f9fd | 2015-09-23 10:34:26 +0200 | [diff] [blame] | 4527 | bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ |
| 4528 | bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */ |
| 4529 | bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */ |
| 4530 | bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */ |
| 4531 | bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */ |
| 4532 | if (!execonly) { |
| 4533 | /* bits 0..2 must not be 100 unless VMX capabilities allow it */ |
| 4534 | bad_mt_xwr |= REPEAT_BYTE(1ull << 4); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4535 | } |
Paolo Bonzini | 951f9fd | 2015-09-23 10:34:26 +0200 | [diff] [blame] | 4536 | rsvd_check->bad_mt_xwr = bad_mt_xwr; |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4537 | } |
| 4538 | |
Xiao Guangrong | 81b8eeb | 2015-08-05 12:04:23 +0800 | [diff] [blame] | 4539 | static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, |
| 4540 | struct kvm_mmu *context, bool execonly) |
| 4541 | { |
| 4542 | __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, |
| 4543 | cpuid_maxphyaddr(vcpu), execonly); |
| 4544 | } |
| 4545 | |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4546 | /* |
| 4547 | * the page table on host is the shadow page table for the page |
| 4548 | * table in guest or amd nested guest, its mmu features completely |
| 4549 | * follow the features in guest. |
| 4550 | */ |
| 4551 | void |
| 4552 | reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) |
| 4553 | { |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 4554 | bool uses_nx = context->nx || |
| 4555 | context->mmu_role.base.smep_andnot_wp; |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4556 | struct rsvd_bits_validate *shadow_zero_check; |
| 4557 | int i; |
Paolo Bonzini | 5f0b819 | 2016-03-09 14:28:02 +0100 | [diff] [blame] | 4558 | |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4559 | /* |
| 4560 | * Passing "true" to the last argument is okay; it adds a check |
| 4561 | * on bit 8 of the SPTEs which KVM doesn't use anyway. |
| 4562 | */ |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4563 | shadow_zero_check = &context->shadow_zero_check; |
| 4564 | __reset_rsvds_bits_mask(vcpu, shadow_zero_check, |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 4565 | shadow_phys_bits, |
Paolo Bonzini | 5f0b819 | 2016-03-09 14:28:02 +0100 | [diff] [blame] | 4566 | context->shadow_root_level, uses_nx, |
Radim Krčmář | d6321d4 | 2017-08-05 00:12:49 +0200 | [diff] [blame] | 4567 | guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), |
| 4568 | is_pse(vcpu), true); |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4569 | |
| 4570 | if (!shadow_me_mask) |
| 4571 | return; |
| 4572 | |
| 4573 | for (i = context->shadow_root_level; --i >= 0;) { |
| 4574 | shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; |
| 4575 | shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; |
| 4576 | } |
| 4577 | |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4578 | } |
| 4579 | EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); |
| 4580 | |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4581 | static inline bool boot_cpu_is_amd(void) |
| 4582 | { |
| 4583 | WARN_ON_ONCE(!tdp_enabled); |
| 4584 | return shadow_x_mask == 0; |
| 4585 | } |
| 4586 | |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4587 | /* |
| 4588 | * the direct page table on host, use as much mmu features as |
| 4589 | * possible, however, kvm currently does not do execution-protection. |
| 4590 | */ |
| 4591 | static void |
| 4592 | reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, |
| 4593 | struct kvm_mmu *context) |
| 4594 | { |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4595 | struct rsvd_bits_validate *shadow_zero_check; |
| 4596 | int i; |
| 4597 | |
| 4598 | shadow_zero_check = &context->shadow_zero_check; |
| 4599 | |
Paolo Bonzini | 6fec214 | 2015-09-22 23:02:14 +0200 | [diff] [blame] | 4600 | if (boot_cpu_is_amd()) |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4601 | __reset_rsvds_bits_mask(vcpu, shadow_zero_check, |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 4602 | shadow_phys_bits, |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4603 | context->shadow_root_level, false, |
Borislav Petkov | b8291adc | 2016-03-29 17:41:58 +0200 | [diff] [blame] | 4604 | boot_cpu_has(X86_FEATURE_GBPAGES), |
| 4605 | true, true); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4606 | else |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4607 | __reset_rsvds_bits_mask_ept(shadow_zero_check, |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 4608 | shadow_phys_bits, |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4609 | false); |
| 4610 | |
Brijesh Singh | ea2800d | 2017-08-25 15:55:40 -0500 | [diff] [blame] | 4611 | if (!shadow_me_mask) |
| 4612 | return; |
| 4613 | |
| 4614 | for (i = context->shadow_root_level; --i >= 0;) { |
| 4615 | shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; |
| 4616 | shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; |
| 4617 | } |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4618 | } |
| 4619 | |
| 4620 | /* |
| 4621 | * as the comments in reset_shadow_zero_bits_mask() except it |
| 4622 | * is the shadow page table for intel nested guest. |
| 4623 | */ |
| 4624 | static void |
| 4625 | reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, |
| 4626 | struct kvm_mmu *context, bool execonly) |
| 4627 | { |
| 4628 | __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, |
Kai Huang | f3ecb59 | 2019-05-03 03:08:53 -0700 | [diff] [blame] | 4629 | shadow_phys_bits, execonly); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4630 | } |
| 4631 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4632 | #define BYTE_MASK(access) \ |
| 4633 | ((1 & (access) ? 2 : 0) | \ |
| 4634 | (2 & (access) ? 4 : 0) | \ |
| 4635 | (3 & (access) ? 8 : 0) | \ |
| 4636 | (4 & (access) ? 16 : 0) | \ |
| 4637 | (5 & (access) ? 32 : 0) | \ |
| 4638 | (6 & (access) ? 64 : 0) | \ |
| 4639 | (7 & (access) ? 128 : 0)) |
| 4640 | |
| 4641 | |
Xiao Guangrong | edc90b7 | 2015-05-11 22:55:21 +0800 | [diff] [blame] | 4642 | static void update_permission_bitmask(struct kvm_vcpu *vcpu, |
| 4643 | struct kvm_mmu *mmu, bool ept) |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4644 | { |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4645 | unsigned byte; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4646 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4647 | const u8 x = BYTE_MASK(ACC_EXEC_MASK); |
| 4648 | const u8 w = BYTE_MASK(ACC_WRITE_MASK); |
| 4649 | const u8 u = BYTE_MASK(ACC_USER_MASK); |
| 4650 | |
| 4651 | bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0; |
| 4652 | bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0; |
| 4653 | bool cr0_wp = is_write_protection(vcpu); |
| 4654 | |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4655 | for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4656 | unsigned pfec = byte << 1; |
| 4657 | |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 4658 | /* |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4659 | * Each "*f" variable has a 1 bit for each UWX value |
| 4660 | * that causes a fault with the given PFEC. |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 4661 | */ |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4662 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4663 | /* Faults from writes to non-writable pages */ |
Arnd Bergmann | a6a6d3b | 2019-07-12 11:12:30 +0200 | [diff] [blame] | 4664 | u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0; |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4665 | /* Faults from user mode accesses to supervisor pages */ |
Arnd Bergmann | a6a6d3b | 2019-07-12 11:12:30 +0200 | [diff] [blame] | 4666 | u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0; |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4667 | /* Faults from fetches of non-executable pages*/ |
Arnd Bergmann | a6a6d3b | 2019-07-12 11:12:30 +0200 | [diff] [blame] | 4668 | u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0; |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4669 | /* Faults from kernel mode fetches of user pages */ |
| 4670 | u8 smepf = 0; |
| 4671 | /* Faults from kernel mode accesses of user pages */ |
| 4672 | u8 smapf = 0; |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 4673 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4674 | if (!ept) { |
| 4675 | /* Faults from kernel mode accesses to user pages */ |
| 4676 | u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4677 | |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4678 | /* Not really needed: !nx will cause pte.nx to fault */ |
| 4679 | if (!mmu->nx) |
| 4680 | ff = 0; |
| 4681 | |
| 4682 | /* Allow supervisor writes if !cr0.wp */ |
| 4683 | if (!cr0_wp) |
| 4684 | wf = (pfec & PFERR_USER_MASK) ? wf : 0; |
| 4685 | |
| 4686 | /* Disallow supervisor fetches of user code if cr4.smep */ |
| 4687 | if (cr4_smep) |
| 4688 | smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0; |
| 4689 | |
| 4690 | /* |
| 4691 | * SMAP:kernel-mode data accesses from user-mode |
| 4692 | * mappings should fault. A fault is considered |
| 4693 | * as a SMAP violation if all of the following |
Peng Hao | 39337ad | 2018-10-04 11:45:00 -0400 | [diff] [blame] | 4694 | * conditions are true: |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4695 | * - X86_CR4_SMAP is set in CR4 |
| 4696 | * - A user page is accessed |
| 4697 | * - The access is not a fetch |
| 4698 | * - Page fault in kernel mode |
| 4699 | * - if CPL = 3 or X86_EFLAGS_AC is clear |
| 4700 | * |
| 4701 | * Here, we cover the first three conditions. |
| 4702 | * The fourth is computed dynamically in permission_fault(); |
| 4703 | * PFERR_RSVD_MASK bit will be set in PFEC if the access is |
| 4704 | * *not* subject to SMAP restrictions. |
| 4705 | */ |
| 4706 | if (cr4_smap) |
| 4707 | smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4708 | } |
Paolo Bonzini | 09f037a | 2017-08-24 17:37:25 +0200 | [diff] [blame] | 4709 | |
| 4710 | mmu->permissions[byte] = ff | uf | wf | smepf | smapf; |
Avi Kivity | 97d64b7 | 2012-09-12 14:52:00 +0300 | [diff] [blame] | 4711 | } |
| 4712 | } |
| 4713 | |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4714 | /* |
| 4715 | * PKU is an additional mechanism by which the paging controls access to |
| 4716 | * user-mode addresses based on the value in the PKRU register. Protection |
| 4717 | * key violations are reported through a bit in the page fault error code. |
| 4718 | * Unlike other bits of the error code, the PK bit is not known at the |
| 4719 | * call site of e.g. gva_to_gpa; it must be computed directly in |
| 4720 | * permission_fault based on two bits of PKRU, on some machine state (CR4, |
| 4721 | * CR0, EFER, CPL), and on other bits of the error code and the page tables. |
| 4722 | * |
| 4723 | * In particular the following conditions come from the error code, the |
| 4724 | * page tables and the machine state: |
| 4725 | * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1 |
| 4726 | * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch) |
| 4727 | * - PK is always zero if U=0 in the page tables |
| 4728 | * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access. |
| 4729 | * |
| 4730 | * The PKRU bitmask caches the result of these four conditions. The error |
| 4731 | * code (minus the P bit) and the page table's U bit form an index into the |
| 4732 | * PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed |
| 4733 | * with the two bits of the PKRU register corresponding to the protection key. |
| 4734 | * For the first three conditions above the bits will be 00, thus masking |
| 4735 | * away both AD and WD. For all reads or if the last condition holds, WD |
| 4736 | * only will be masked away. |
| 4737 | */ |
| 4738 | static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 4739 | bool ept) |
| 4740 | { |
| 4741 | unsigned bit; |
| 4742 | bool wp; |
| 4743 | |
| 4744 | if (ept) { |
| 4745 | mmu->pkru_mask = 0; |
| 4746 | return; |
| 4747 | } |
| 4748 | |
| 4749 | /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */ |
| 4750 | if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) { |
| 4751 | mmu->pkru_mask = 0; |
| 4752 | return; |
| 4753 | } |
| 4754 | |
| 4755 | wp = is_write_protection(vcpu); |
| 4756 | |
| 4757 | for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { |
| 4758 | unsigned pfec, pkey_bits; |
| 4759 | bool check_pkey, check_write, ff, uf, wf, pte_user; |
| 4760 | |
| 4761 | pfec = bit << 1; |
| 4762 | ff = pfec & PFERR_FETCH_MASK; |
| 4763 | uf = pfec & PFERR_USER_MASK; |
| 4764 | wf = pfec & PFERR_WRITE_MASK; |
| 4765 | |
| 4766 | /* PFEC.RSVD is replaced by ACC_USER_MASK. */ |
| 4767 | pte_user = pfec & PFERR_RSVD_MASK; |
| 4768 | |
| 4769 | /* |
| 4770 | * Only need to check the access which is not an |
| 4771 | * instruction fetch and is to a user page. |
| 4772 | */ |
| 4773 | check_pkey = (!ff && pte_user); |
| 4774 | /* |
| 4775 | * write access is controlled by PKRU if it is a |
| 4776 | * user access or CR0.WP = 1. |
| 4777 | */ |
| 4778 | check_write = check_pkey && wf && (uf || wp); |
| 4779 | |
| 4780 | /* PKRU.AD stops both read and write access. */ |
| 4781 | pkey_bits = !!check_pkey; |
| 4782 | /* PKRU.WD stops write access. */ |
| 4783 | pkey_bits |= (!!check_write) << 1; |
| 4784 | |
| 4785 | mmu->pkru_mask |= (pkey_bits & 3) << pfec; |
| 4786 | } |
| 4787 | } |
| 4788 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4789 | static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4790 | { |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4791 | unsigned root_level = mmu->root_level; |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4792 | |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4793 | mmu->last_nonleaf_level = root_level; |
| 4794 | if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu)) |
| 4795 | mmu->last_nonleaf_level++; |
Avi Kivity | 6fd01b7 | 2012-09-12 20:46:56 +0300 | [diff] [blame] | 4796 | } |
| 4797 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4798 | static void paging64_init_context_common(struct kvm_vcpu *vcpu, |
| 4799 | struct kvm_mmu *context, |
| 4800 | int level) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4801 | { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4802 | context->nx = is_nx(vcpu); |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4803 | context->root_level = level; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4804 | |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4805 | reset_rsvds_bits_mask(vcpu, context); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4806 | update_permission_bitmask(vcpu, context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4807 | update_pkru_bitmask(vcpu, context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4808 | update_last_nonleaf_level(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4809 | |
Paolo Bonzini | fa4a2c0 | 2013-10-02 16:56:16 +0200 | [diff] [blame] | 4810 | MMU_WARN_ON(!is_pae(vcpu)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4811 | context->page_fault = paging64_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4812 | context->gva_to_gpa = paging64_gva_to_gpa; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4813 | context->sync_page = paging64_sync_page; |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 4814 | context->invlpg = paging64_invlpg; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 4815 | context->update_pte = paging64_update_pte; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 4816 | context->shadow_root_level = level; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4817 | context->direct_map = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4818 | } |
| 4819 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4820 | static void paging64_init_context(struct kvm_vcpu *vcpu, |
| 4821 | struct kvm_mmu *context) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 4822 | { |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4823 | int root_level = is_la57_mode(vcpu) ? |
| 4824 | PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
| 4825 | |
| 4826 | paging64_init_context_common(vcpu, context, root_level); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 4827 | } |
| 4828 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4829 | static void paging32_init_context(struct kvm_vcpu *vcpu, |
| 4830 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4831 | { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4832 | context->nx = false; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4833 | context->root_level = PT32_ROOT_LEVEL; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4834 | |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4835 | reset_rsvds_bits_mask(vcpu, context); |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4836 | update_permission_bitmask(vcpu, context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4837 | update_pkru_bitmask(vcpu, context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4838 | update_last_nonleaf_level(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4839 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4840 | context->page_fault = paging32_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4841 | context->gva_to_gpa = paging32_gva_to_gpa; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4842 | context->sync_page = paging32_sync_page; |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 4843 | context->invlpg = paging32_invlpg; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 4844 | context->update_pte = paging32_update_pte; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4845 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4846 | context->direct_map = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4847 | } |
| 4848 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4849 | static void paging32E_init_context(struct kvm_vcpu *vcpu, |
| 4850 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4851 | { |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4852 | paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4853 | } |
| 4854 | |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4855 | static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4856 | { |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4857 | union kvm_mmu_extended_role ext = {0}; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4858 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4859 | ext.cr0_pg = !!is_paging(vcpu); |
Vitaly Kuznetsov | 0699c64 | 2019-04-30 19:33:26 +0200 | [diff] [blame] | 4860 | ext.cr4_pae = !!is_pae(vcpu); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4861 | ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); |
| 4862 | ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); |
| 4863 | ext.cr4_pse = !!is_pse(vcpu); |
| 4864 | ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); |
Yu Zhang | de3ccd2 | 2019-02-01 00:09:23 +0800 | [diff] [blame] | 4865 | ext.maxphyaddr = cpuid_maxphyaddr(vcpu); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 4866 | |
| 4867 | ext.valid = 1; |
| 4868 | |
| 4869 | return ext; |
| 4870 | } |
| 4871 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4872 | static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, |
| 4873 | bool base_only) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4874 | { |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4875 | union kvm_mmu_role role = {0}; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4876 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4877 | role.base.access = ACC_ALL; |
| 4878 | role.base.nxe = !!is_nx(vcpu); |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4879 | role.base.cr0_wp = is_write_protection(vcpu); |
| 4880 | role.base.smm = is_smm(vcpu); |
| 4881 | role.base.guest_mode = is_guest_mode(vcpu); |
| 4882 | |
| 4883 | if (base_only) |
| 4884 | return role; |
| 4885 | |
| 4886 | role.ext = kvm_calc_mmu_role_ext(vcpu); |
| 4887 | |
| 4888 | return role; |
| 4889 | } |
| 4890 | |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4891 | static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu) |
| 4892 | { |
| 4893 | /* Use 5-level TDP if and only if it's useful/necessary. */ |
Sean Christopherson | 8301305 | 2020-07-15 20:41:22 -0700 | [diff] [blame] | 4894 | if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48) |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4895 | return 4; |
| 4896 | |
Sean Christopherson | 8301305 | 2020-07-15 20:41:22 -0700 | [diff] [blame] | 4897 | return max_tdp_level; |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4898 | } |
| 4899 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4900 | static union kvm_mmu_role |
| 4901 | kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) |
| 4902 | { |
| 4903 | union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); |
| 4904 | |
| 4905 | role.base.ad_disabled = (shadow_accessed_mask == 0); |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4906 | role.base.level = kvm_mmu_get_tdp_level(vcpu); |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4907 | role.base.direct = true; |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 4908 | role.base.gpte_is_8_bytes = true; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4909 | |
| 4910 | return role; |
| 4911 | } |
| 4912 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4913 | static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4914 | { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 4915 | struct kvm_mmu *context = &vcpu->arch.root_mmu; |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4916 | union kvm_mmu_role new_role = |
| 4917 | kvm_calc_tdp_mmu_root_page_role(vcpu, false); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4918 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4919 | if (new_role.as_u64 == context->mmu_role.as_u64) |
| 4920 | return; |
| 4921 | |
| 4922 | context->mmu_role.as_u64 = new_role.as_u64; |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 4923 | context->page_fault = kvm_tdp_page_fault; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 4924 | context->sync_page = nonpaging_sync_page; |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 4925 | context->invlpg = NULL; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 4926 | context->update_pte = nonpaging_update_pte; |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 4927 | context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu); |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 4928 | context->direct_map = true; |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 4929 | context->get_guest_pgd = get_cr3; |
Avi Kivity | e4e517b | 2011-07-28 11:36:17 +0300 | [diff] [blame] | 4930 | context->get_pdptr = kvm_pdptr_read; |
Joerg Roedel | cb659db | 2010-09-10 17:30:43 +0200 | [diff] [blame] | 4931 | context->inject_page_fault = kvm_inject_page_fault; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4932 | |
| 4933 | if (!is_paging(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4934 | context->nx = false; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4935 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
| 4936 | context->root_level = 0; |
| 4937 | } else if (is_long_mode(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4938 | context->nx = is_nx(vcpu); |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 4939 | context->root_level = is_la57_mode(vcpu) ? |
| 4940 | PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4941 | reset_rsvds_bits_mask(vcpu, context); |
| 4942 | context->gva_to_gpa = paging64_gva_to_gpa; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4943 | } else if (is_pae(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4944 | context->nx = is_nx(vcpu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4945 | context->root_level = PT32E_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4946 | reset_rsvds_bits_mask(vcpu, context); |
| 4947 | context->gva_to_gpa = paging64_gva_to_gpa; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4948 | } else { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 4949 | context->nx = false; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4950 | context->root_level = PT32_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 4951 | reset_rsvds_bits_mask(vcpu, context); |
| 4952 | context->gva_to_gpa = paging32_gva_to_gpa; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4953 | } |
| 4954 | |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 4955 | update_permission_bitmask(vcpu, context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 4956 | update_pkru_bitmask(vcpu, context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 4957 | update_last_nonleaf_level(vcpu, context); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 4958 | reset_tdp_shadow_zero_bits_mask(vcpu, context); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 4959 | } |
| 4960 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4961 | static union kvm_mmu_role |
Sean Christopherson | 59505b5 | 2020-07-15 20:41:15 -0700 | [diff] [blame] | 4962 | kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4963 | { |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4964 | union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 4965 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4966 | role.base.smep_andnot_wp = role.ext.cr4_smep && |
| 4967 | !is_write_protection(vcpu); |
| 4968 | role.base.smap_andnot_wp = role.ext.cr4_smap && |
| 4969 | !is_write_protection(vcpu); |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 4970 | role.base.gpte_is_8_bytes = !!is_pae(vcpu); |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4971 | |
Sean Christopherson | 59505b5 | 2020-07-15 20:41:15 -0700 | [diff] [blame] | 4972 | return role; |
| 4973 | } |
| 4974 | |
| 4975 | static union kvm_mmu_role |
| 4976 | kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) |
| 4977 | { |
| 4978 | union kvm_mmu_role role = |
| 4979 | kvm_calc_shadow_root_page_role_common(vcpu, base_only); |
| 4980 | |
| 4981 | role.base.direct = !is_paging(vcpu); |
| 4982 | |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4983 | if (!is_long_mode(vcpu)) |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4984 | role.base.level = PT32E_ROOT_LEVEL; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4985 | else if (is_la57_mode(vcpu)) |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4986 | role.base.level = PT64_ROOT_5LEVEL; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4987 | else |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 4988 | role.base.level = PT64_ROOT_4LEVEL; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4989 | |
| 4990 | return role; |
| 4991 | } |
| 4992 | |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 4993 | static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context, |
| 4994 | u32 cr0, u32 cr4, u32 efer, |
| 4995 | union kvm_mmu_role new_role) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 4996 | { |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 4997 | if (!(cr0 & X86_CR0_PG)) |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 4998 | nonpaging_init_context(vcpu, context); |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 4999 | else if (efer & EFER_LMA) |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 5000 | paging64_init_context(vcpu, context); |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 5001 | else if (cr4 & X86_CR4_PAE) |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 5002 | paging32E_init_context(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5003 | else |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 5004 | paging32_init_context(vcpu, context); |
Avi Kivity | a770f6f | 2008-12-21 19:20:09 +0200 | [diff] [blame] | 5005 | |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 5006 | context->mmu_role.as_u64 = new_role.as_u64; |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 5007 | reset_shadow_zero_bits_mask(vcpu, context); |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 5008 | } |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 5009 | |
| 5010 | static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer) |
| 5011 | { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 5012 | struct kvm_mmu *context = &vcpu->arch.root_mmu; |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 5013 | union kvm_mmu_role new_role = |
| 5014 | kvm_calc_shadow_mmu_root_page_role(vcpu, false); |
| 5015 | |
| 5016 | if (new_role.as_u64 != context->mmu_role.as_u64) |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 5017 | shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role); |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 5018 | } |
| 5019 | |
Sean Christopherson | 59505b5 | 2020-07-15 20:41:15 -0700 | [diff] [blame] | 5020 | static union kvm_mmu_role |
| 5021 | kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu) |
| 5022 | { |
| 5023 | union kvm_mmu_role role = |
| 5024 | kvm_calc_shadow_root_page_role_common(vcpu, false); |
| 5025 | |
| 5026 | role.base.direct = false; |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 5027 | role.base.level = kvm_mmu_get_tdp_level(vcpu); |
Sean Christopherson | 59505b5 | 2020-07-15 20:41:15 -0700 | [diff] [blame] | 5028 | |
| 5029 | return role; |
| 5030 | } |
| 5031 | |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 5032 | void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, |
| 5033 | gpa_t nested_cr3) |
| 5034 | { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 5035 | struct kvm_mmu *context = &vcpu->arch.guest_mmu; |
Sean Christopherson | 59505b5 | 2020-07-15 20:41:15 -0700 | [diff] [blame] | 5036 | union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu); |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 5037 | |
Sean Christopherson | 096586fd | 2020-07-15 20:41:14 -0700 | [diff] [blame] | 5038 | context->shadow_root_level = new_role.base.level; |
| 5039 | |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 5040 | __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false); |
| 5041 | |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 5042 | if (new_role.as_u64 != context->mmu_role.as_u64) |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 5043 | shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role); |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 5044 | } |
| 5045 | EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 5046 | |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5047 | static union kvm_mmu_role |
| 5048 | kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5049 | bool execonly, u8 level) |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5050 | { |
Sean Christopherson | 552c69b1 | 2019-03-07 15:27:43 -0800 | [diff] [blame] | 5051 | union kvm_mmu_role role = {0}; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5052 | |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5053 | /* SMM flag is inherited from root_mmu */ |
| 5054 | role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5055 | |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5056 | role.base.level = level; |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5057 | role.base.gpte_is_8_bytes = true; |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5058 | role.base.direct = false; |
| 5059 | role.base.ad_disabled = !accessed_dirty; |
| 5060 | role.base.guest_mode = true; |
| 5061 | role.base.access = ACC_ALL; |
| 5062 | |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5063 | /* |
| 5064 | * WP=1 and NOT_WP=1 is an impossible combination, use WP and the |
| 5065 | * SMAP variation to denote shadow EPT entries. |
| 5066 | */ |
| 5067 | role.base.cr0_wp = true; |
| 5068 | role.base.smap_andnot_wp = true; |
| 5069 | |
Sean Christopherson | 552c69b1 | 2019-03-07 15:27:43 -0800 | [diff] [blame] | 5070 | role.ext = kvm_calc_mmu_role_ext(vcpu); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5071 | role.ext.execonly = execonly; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5072 | |
| 5073 | return role; |
| 5074 | } |
| 5075 | |
Paolo Bonzini | ae1e2d1 | 2017-03-30 11:55:30 +0200 | [diff] [blame] | 5076 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, |
Junaid Shahid | 50c28f2 | 2018-06-27 14:59:11 -0700 | [diff] [blame] | 5077 | bool accessed_dirty, gpa_t new_eptp) |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5078 | { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 5079 | struct kvm_mmu *context = &vcpu->arch.guest_mmu; |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5080 | u8 level = vmx_eptp_page_walk_level(new_eptp); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5081 | union kvm_mmu_role new_role = |
| 5082 | kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5083 | execonly, level); |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 5084 | |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 5085 | __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true); |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5086 | |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5087 | if (new_role.as_u64 == context->mmu_role.as_u64) |
| 5088 | return; |
| 5089 | |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5090 | context->shadow_root_level = level; |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5091 | |
| 5092 | context->nx = true; |
Paolo Bonzini | ae1e2d1 | 2017-03-30 11:55:30 +0200 | [diff] [blame] | 5093 | context->ept_ad = accessed_dirty; |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5094 | context->page_fault = ept_page_fault; |
| 5095 | context->gva_to_gpa = ept_gva_to_gpa; |
| 5096 | context->sync_page = ept_sync_page; |
| 5097 | context->invlpg = ept_invlpg; |
| 5098 | context->update_pte = ept_update_pte; |
Sean Christopherson | bb1fcc7 | 2020-03-02 18:02:36 -0800 | [diff] [blame] | 5099 | context->root_level = level; |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5100 | context->direct_map = false; |
Vitaly Kuznetsov | a336282 | 2018-10-08 21:28:11 +0200 | [diff] [blame] | 5101 | context->mmu_role.as_u64 = new_role.as_u64; |
Vitaly Kuznetsov | 3dc773e | 2018-10-08 21:28:06 +0200 | [diff] [blame] | 5102 | |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5103 | update_permission_bitmask(vcpu, context, true); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 5104 | update_pkru_bitmask(vcpu, context, true); |
Ladi Prosek | fd19d3b4 | 2017-10-05 11:10:22 +0200 | [diff] [blame] | 5105 | update_last_nonleaf_level(vcpu, context); |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5106 | reset_rsvds_bits_mask_ept(vcpu, context, execonly); |
Xiao Guangrong | c258b62 | 2015-08-05 12:04:24 +0800 | [diff] [blame] | 5107 | reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); |
Nadav Har'El | 155a97a | 2013-08-05 11:07:16 +0300 | [diff] [blame] | 5108 | } |
| 5109 | EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); |
| 5110 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 5111 | static void init_kvm_softmmu(struct kvm_vcpu *vcpu) |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 5112 | { |
Paolo Bonzini | 8c00865 | 2020-07-10 16:11:50 +0200 | [diff] [blame] | 5113 | struct kvm_mmu *context = &vcpu->arch.root_mmu; |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 5114 | |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 5115 | kvm_init_shadow_mmu(vcpu, |
| 5116 | kvm_read_cr0_bits(vcpu, X86_CR0_PG), |
| 5117 | kvm_read_cr4_bits(vcpu, X86_CR4_PAE), |
| 5118 | vcpu->arch.efer); |
| 5119 | |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 5120 | context->get_guest_pgd = get_cr3; |
Paolo Bonzini | ad896af | 2013-10-02 16:56:14 +0200 | [diff] [blame] | 5121 | context->get_pdptr = kvm_pdptr_read; |
| 5122 | context->inject_page_fault = kvm_inject_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5123 | } |
| 5124 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 5125 | static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5126 | { |
Vitaly Kuznetsov | bf627a9 | 2018-10-08 21:28:13 +0200 | [diff] [blame] | 5127 | union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5128 | struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; |
| 5129 | |
Vitaly Kuznetsov | bf627a9 | 2018-10-08 21:28:13 +0200 | [diff] [blame] | 5130 | if (new_role.as_u64 == g_context->mmu_role.as_u64) |
| 5131 | return; |
| 5132 | |
| 5133 | g_context->mmu_role.as_u64 = new_role.as_u64; |
Sean Christopherson | d8dd54e | 2020-03-02 18:02:39 -0800 | [diff] [blame] | 5134 | g_context->get_guest_pgd = get_cr3; |
Avi Kivity | e4e517b | 2011-07-28 11:36:17 +0300 | [diff] [blame] | 5135 | g_context->get_pdptr = kvm_pdptr_read; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5136 | g_context->inject_page_fault = kvm_inject_page_fault; |
| 5137 | |
| 5138 | /* |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5139 | * L2 page tables are never shadowed, so there is no need to sync |
| 5140 | * SPTEs. |
| 5141 | */ |
| 5142 | g_context->invlpg = NULL; |
| 5143 | |
| 5144 | /* |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5145 | * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using |
David Matlack | 0af2593 | 2015-12-30 08:26:17 -0800 | [diff] [blame] | 5146 | * L1's nested page tables (e.g. EPT12). The nested translation |
| 5147 | * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using |
| 5148 | * L2's page tables as the first level of translation and L1's |
| 5149 | * nested page tables as the second level of translation. Basically |
| 5150 | * the gva_to_gpa functions between mmu and nested_mmu are swapped. |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5151 | */ |
| 5152 | if (!is_paging(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 5153 | g_context->nx = false; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5154 | g_context->root_level = 0; |
| 5155 | g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; |
| 5156 | } else if (is_long_mode(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 5157 | g_context->nx = is_nx(vcpu); |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 5158 | g_context->root_level = is_la57_mode(vcpu) ? |
| 5159 | PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 5160 | reset_rsvds_bits_mask(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5161 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
| 5162 | } else if (is_pae(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 5163 | g_context->nx = is_nx(vcpu); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5164 | g_context->root_level = PT32E_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 5165 | reset_rsvds_bits_mask(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5166 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
| 5167 | } else { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 5168 | g_context->nx = false; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5169 | g_context->root_level = PT32_ROOT_LEVEL; |
Davidlohr Bueso | 4d6931c | 2012-03-05 16:53:06 +0100 | [diff] [blame] | 5170 | reset_rsvds_bits_mask(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5171 | g_context->gva_to_gpa = paging32_gva_to_gpa_nested; |
| 5172 | } |
| 5173 | |
Yang Zhang | 25d9208 | 2013-08-06 12:00:32 +0300 | [diff] [blame] | 5174 | update_permission_bitmask(vcpu, g_context, false); |
Huaitong Han | 2d34410 | 2016-03-22 16:51:19 +0800 | [diff] [blame] | 5175 | update_pkru_bitmask(vcpu, g_context, false); |
Paolo Bonzini | 6bb69c9 | 2016-02-23 12:51:19 +0100 | [diff] [blame] | 5176 | update_last_nonleaf_level(vcpu, g_context); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5177 | } |
| 5178 | |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 5179 | void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 5180 | { |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 5181 | if (reset_roots) { |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5182 | uint i; |
| 5183 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5184 | vcpu->arch.mmu->root_hpa = INVALID_PAGE; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5185 | |
| 5186 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5187 | vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 5188 | } |
| 5189 | |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5190 | if (mmu_is_nested(vcpu)) |
Paolo Bonzini | e0c6db3 | 2014-12-23 13:39:46 +0100 | [diff] [blame] | 5191 | init_kvm_nested_mmu(vcpu); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 5192 | else if (tdp_enabled) |
Paolo Bonzini | e0c6db3 | 2014-12-23 13:39:46 +0100 | [diff] [blame] | 5193 | init_kvm_tdp_mmu(vcpu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 5194 | else |
Paolo Bonzini | e0c6db3 | 2014-12-23 13:39:46 +0100 | [diff] [blame] | 5195 | init_kvm_softmmu(vcpu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 5196 | } |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 5197 | EXPORT_SYMBOL_GPL(kvm_init_mmu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 5198 | |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5199 | static union kvm_mmu_page_role |
| 5200 | kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu) |
| 5201 | { |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 5202 | union kvm_mmu_role role; |
| 5203 | |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5204 | if (tdp_enabled) |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 5205 | role = kvm_calc_tdp_mmu_root_page_role(vcpu, true); |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5206 | else |
Vitaly Kuznetsov | 7dcd575 | 2018-10-08 21:28:12 +0200 | [diff] [blame] | 5207 | role = kvm_calc_shadow_mmu_root_page_role(vcpu, true); |
| 5208 | |
| 5209 | return role.base; |
Junaid Shahid | 9fa7211 | 2018-06-27 14:59:07 -0700 | [diff] [blame] | 5210 | } |
Dong, Eddie | 489f1d6 | 2008-01-07 11:14:20 +0200 | [diff] [blame] | 5211 | |
Paolo Bonzini | 8a3c1a33 | 2013-10-02 16:56:13 +0200 | [diff] [blame] | 5212 | void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 5213 | { |
Paolo Bonzini | 95f93af | 2013-10-02 16:56:12 +0200 | [diff] [blame] | 5214 | kvm_mmu_unload(vcpu); |
Junaid Shahid | 1c53da3 | 2018-06-27 14:59:10 -0700 | [diff] [blame] | 5215 | kvm_init_mmu(vcpu, true); |
Eddie Dong | 8668a3c | 2007-10-10 14:26:45 +0800 | [diff] [blame] | 5216 | } |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 5217 | EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); |
| 5218 | |
| 5219 | int kvm_mmu_load(struct kvm_vcpu *vcpu) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 5220 | { |
| 5221 | int r; |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 5222 | |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 5223 | r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map); |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 5224 | if (r) |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 5225 | goto out; |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 5226 | r = mmu_alloc_roots(vcpu); |
Takuya Yoshikawa | e2858b4 | 2013-05-09 15:45:12 +0900 | [diff] [blame] | 5227 | kvm_mmu_sync_roots(vcpu); |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 5228 | if (r) |
| 5229 | goto out; |
Paolo Bonzini | 727a7e2 | 2020-03-05 03:52:50 -0500 | [diff] [blame] | 5230 | kvm_mmu_load_pgd(vcpu); |
Sean Christopherson | 8c8560b | 2020-03-20 14:28:21 -0700 | [diff] [blame] | 5231 | kvm_x86_ops.tlb_flush_current(vcpu); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 5232 | out: |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5233 | return r; |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 5234 | } |
| 5235 | EXPORT_SYMBOL_GPL(kvm_mmu_load); |
| 5236 | |
| 5237 | void kvm_mmu_unload(struct kvm_vcpu *vcpu) |
| 5238 | { |
Vitaly Kuznetsov | 14c07ad | 2018-10-08 21:28:08 +0200 | [diff] [blame] | 5239 | kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); |
| 5240 | WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); |
| 5241 | kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); |
| 5242 | WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5243 | } |
Joerg Roedel | 4b16184 | 2010-09-10 17:31:03 +0200 | [diff] [blame] | 5244 | EXPORT_SYMBOL_GPL(kvm_mmu_unload); |
Avi Kivity | 09072da | 2007-05-01 14:16:52 +0300 | [diff] [blame] | 5245 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 5246 | static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, |
Xiao Guangrong | 7c56252 | 2011-03-28 10:29:27 +0800 | [diff] [blame] | 5247 | struct kvm_mmu_page *sp, u64 *spte, |
| 5248 | const void *new) |
Avi Kivity | 0028425 | 2007-05-01 16:53:31 +0300 | [diff] [blame] | 5249 | { |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 5250 | if (sp->role.level != PG_LEVEL_4K) { |
Joerg Roedel | 7e4e405 | 2009-07-27 16:30:46 +0200 | [diff] [blame] | 5251 | ++vcpu->kvm->stat.mmu_pde_zapped; |
| 5252 | return; |
Marcelo Tosatti | 3094538 | 2008-06-11 20:32:40 -0300 | [diff] [blame] | 5253 | } |
Avi Kivity | 0028425 | 2007-05-01 16:53:31 +0300 | [diff] [blame] | 5254 | |
Avi Kivity | 4cee576 | 2007-11-18 16:37:07 +0200 | [diff] [blame] | 5255 | ++vcpu->kvm->stat.mmu_pte_updated; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5256 | vcpu->arch.mmu->update_pte(vcpu, sp, spte, new); |
Avi Kivity | 0028425 | 2007-05-01 16:53:31 +0300 | [diff] [blame] | 5257 | } |
| 5258 | |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 5259 | static bool need_remote_flush(u64 old, u64 new) |
| 5260 | { |
| 5261 | if (!is_shadow_present_pte(old)) |
| 5262 | return false; |
| 5263 | if (!is_shadow_present_pte(new)) |
| 5264 | return true; |
| 5265 | if ((old ^ new) & PT64_BASE_ADDR_MASK) |
| 5266 | return true; |
Gleb Natapov | 5316622 | 2013-08-05 11:07:14 +0300 | [diff] [blame] | 5267 | old ^= shadow_nx_mask; |
| 5268 | new ^= shadow_nx_mask; |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 5269 | return (old & ~new & PT64_PERM_MASK) != 0; |
| 5270 | } |
| 5271 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5272 | static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 5273 | int *bytes) |
Avi Kivity | da4a00f | 2007-01-05 16:36:44 -0800 | [diff] [blame] | 5274 | { |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 5275 | u64 gentry = 0; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5276 | int r; |
Avi Kivity | 72016f3 | 2010-03-15 13:59:53 +0200 | [diff] [blame] | 5277 | |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 5278 | /* |
| 5279 | * Assume that the pte write on a page table of the same type |
Xiao Guangrong | 49b26e2 | 2011-03-04 19:00:00 +0800 | [diff] [blame] | 5280 | * as the current vcpu paging mode since we update the sptes only |
| 5281 | * when they have the same mode. |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 5282 | */ |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5283 | if (is_pae(vcpu) && *bytes == 4) { |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 5284 | /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5285 | *gpa &= ~(gpa_t)7; |
| 5286 | *bytes = 8; |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 5287 | } |
| 5288 | |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 5289 | if (*bytes == 4 || *bytes == 8) { |
| 5290 | r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes); |
| 5291 | if (r) |
| 5292 | gentry = 0; |
Avi Kivity | 72016f3 | 2010-03-15 13:59:53 +0200 | [diff] [blame] | 5293 | } |
| 5294 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5295 | return gentry; |
| 5296 | } |
| 5297 | |
| 5298 | /* |
| 5299 | * If we're seeing too many writes to a page, it may no longer be a page table, |
| 5300 | * or we may be forking, in which case it is better to unmap the page. |
| 5301 | */ |
Xiao Guangrong | a138fe7 | 2011-12-16 18:18:10 +0800 | [diff] [blame] | 5302 | static bool detect_write_flooding(struct kvm_mmu_page *sp) |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5303 | { |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 5304 | /* |
| 5305 | * Skip write-flooding detected for the sp whose level is 1, because |
| 5306 | * it can become unsync, then the guest page is not write-protected. |
| 5307 | */ |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 5308 | if (sp->role.level == PG_LEVEL_4K) |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 5309 | return false; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5310 | |
Xiao Guangrong | e5691a8 | 2016-02-24 17:51:12 +0800 | [diff] [blame] | 5311 | atomic_inc(&sp->write_flooding_count); |
| 5312 | return atomic_read(&sp->write_flooding_count) >= 3; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5313 | } |
| 5314 | |
| 5315 | /* |
| 5316 | * Misaligned accesses are too much trouble to fix up; also, they usually |
| 5317 | * indicate a page is not used as a page table. |
| 5318 | */ |
| 5319 | static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, |
| 5320 | int bytes) |
| 5321 | { |
| 5322 | unsigned offset, pte_size, misaligned; |
| 5323 | |
| 5324 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", |
| 5325 | gpa, bytes, sp->role.word); |
| 5326 | |
| 5327 | offset = offset_in_page(gpa); |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5328 | pte_size = sp->role.gpte_is_8_bytes ? 8 : 4; |
Xiao Guangrong | 5d9ca30 | 2011-09-22 16:57:55 +0800 | [diff] [blame] | 5329 | |
| 5330 | /* |
| 5331 | * Sometimes, the OS only writes the last one bytes to update status |
| 5332 | * bits, for example, in linux, andb instruction is used in clear_bit(). |
| 5333 | */ |
| 5334 | if (!(offset & (pte_size - 1)) && bytes == 1) |
| 5335 | return false; |
| 5336 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5337 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); |
| 5338 | misaligned |= bytes < 4; |
| 5339 | |
| 5340 | return misaligned; |
| 5341 | } |
| 5342 | |
| 5343 | static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) |
| 5344 | { |
| 5345 | unsigned page_offset, quadrant; |
| 5346 | u64 *spte; |
| 5347 | int level; |
| 5348 | |
| 5349 | page_offset = offset_in_page(gpa); |
| 5350 | level = sp->role.level; |
| 5351 | *nspte = 1; |
Sean Christopherson | 47c42e6 | 2019-03-07 15:27:44 -0800 | [diff] [blame] | 5352 | if (!sp->role.gpte_is_8_bytes) { |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5353 | page_offset <<= 1; /* 32->64 */ |
| 5354 | /* |
| 5355 | * A 32-bit pde maps 4MB while the shadow pdes map |
| 5356 | * only 2MB. So we need to double the offset again |
| 5357 | * and zap two pdes instead of one. |
| 5358 | */ |
| 5359 | if (level == PT32_ROOT_LEVEL) { |
| 5360 | page_offset &= ~7; /* kill rounding error */ |
| 5361 | page_offset <<= 1; |
| 5362 | *nspte = 2; |
| 5363 | } |
| 5364 | quadrant = page_offset >> PAGE_SHIFT; |
| 5365 | page_offset &= ~PAGE_MASK; |
| 5366 | if (quadrant != sp->role.quadrant) |
| 5367 | return NULL; |
| 5368 | } |
| 5369 | |
| 5370 | spte = &sp->spt[page_offset / sizeof(*spte)]; |
| 5371 | return spte; |
| 5372 | } |
| 5373 | |
Sean Christopherson | a102a67 | 2020-03-02 18:02:34 -0800 | [diff] [blame] | 5374 | /* |
| 5375 | * Ignore various flags when determining if a SPTE can be immediately |
| 5376 | * overwritten for the current MMU. |
| 5377 | * - level: explicitly checked in mmu_pte_write_new_pte(), and will never |
| 5378 | * match the current MMU role, as MMU's level tracks the root level. |
| 5379 | * - access: updated based on the new guest PTE |
| 5380 | * - quadrant: handled by get_written_sptes() |
| 5381 | * - invalid: always false (loop only walks valid shadow pages) |
| 5382 | */ |
| 5383 | static const union kvm_mmu_page_role role_ign = { |
| 5384 | .level = 0xf, |
| 5385 | .access = 0x7, |
| 5386 | .quadrant = 0x3, |
| 5387 | .invalid = 0x1, |
| 5388 | }; |
| 5389 | |
Xiao Guangrong | 13d268c | 2016-02-24 17:51:16 +0800 | [diff] [blame] | 5390 | static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
Jike Song | d126363 | 2016-10-25 15:50:42 +0800 | [diff] [blame] | 5391 | const u8 *new, int bytes, |
| 5392 | struct kvm_page_track_notifier_node *node) |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5393 | { |
| 5394 | gfn_t gfn = gpa >> PAGE_SHIFT; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5395 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5396 | LIST_HEAD(invalid_list); |
| 5397 | u64 entry, gentry, *spte; |
| 5398 | int npte; |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5399 | bool remote_flush, local_flush; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5400 | |
| 5401 | /* |
| 5402 | * If we don't have indirect shadow pages, it means no page is |
| 5403 | * write-protected, so we can exit simply. |
| 5404 | */ |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 5405 | if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5406 | return; |
| 5407 | |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5408 | remote_flush = local_flush = false; |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5409 | |
| 5410 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
| 5411 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5412 | /* |
| 5413 | * No need to care whether allocation memory is successful |
| 5414 | * or not since pte prefetch is skiped if it does not have |
| 5415 | * enough objects in the cache. |
| 5416 | */ |
Sean Christopherson | 378f5cd | 2020-07-02 19:35:36 -0700 | [diff] [blame] | 5417 | mmu_topup_memory_caches(vcpu, true); |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5418 | |
| 5419 | spin_lock(&vcpu->kvm->mmu_lock); |
Junaid Shahid | 0e0fee5 | 2018-10-31 14:53:57 -0700 | [diff] [blame] | 5420 | |
| 5421 | gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); |
| 5422 | |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5423 | ++vcpu->kvm->stat.mmu_pte_write; |
Xiao Guangrong | 0375f7f | 2011-11-28 20:41:00 +0800 | [diff] [blame] | 5424 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5425 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 5426 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { |
Xiao Guangrong | a30f47c | 2011-09-22 16:58:36 +0800 | [diff] [blame] | 5427 | if (detect_write_misaligned(sp, gpa, bytes) || |
Xiao Guangrong | a138fe7 | 2011-12-16 18:18:10 +0800 | [diff] [blame] | 5428 | detect_write_flooding(sp)) { |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5429 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); |
Avi Kivity | 4cee576 | 2007-11-18 16:37:07 +0200 | [diff] [blame] | 5430 | ++vcpu->kvm->stat.mmu_flooded; |
Avi Kivity | 0e7bc4b | 2007-01-05 16:36:48 -0800 | [diff] [blame] | 5431 | continue; |
| 5432 | } |
Xiao Guangrong | 889e5cb | 2011-09-22 16:57:23 +0800 | [diff] [blame] | 5433 | |
| 5434 | spte = get_written_sptes(sp, gpa, &npte); |
| 5435 | if (!spte) |
| 5436 | continue; |
| 5437 | |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 5438 | local_flush = true; |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 5439 | while (npte--) { |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 5440 | u32 base_role = vcpu->arch.mmu->mmu_role.base.word; |
| 5441 | |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 5442 | entry = *spte; |
Ben Gardon | 2de4085 | 2020-09-23 15:14:06 -0700 | [diff] [blame] | 5443 | mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL); |
Xiao Guangrong | fa1de2b | 2010-07-16 11:19:51 +0800 | [diff] [blame] | 5444 | if (gentry && |
Sean Christopherson | a102a67 | 2020-03-02 18:02:34 -0800 | [diff] [blame] | 5445 | !((sp->role.word ^ base_role) & ~role_ign.word) && |
| 5446 | rmap_can_add(vcpu)) |
Xiao Guangrong | 7c56252 | 2011-03-28 10:29:27 +0800 | [diff] [blame] | 5447 | mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); |
Gleb Natapov | 9bb4f6b | 2013-01-30 16:45:01 +0200 | [diff] [blame] | 5448 | if (need_remote_flush(entry, *spte)) |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 5449 | remote_flush = true; |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 5450 | ++spte; |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5451 | } |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5452 | } |
Paolo Bonzini | b8c67b7 | 2016-02-24 11:21:55 +0100 | [diff] [blame] | 5453 | kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); |
Xiao Guangrong | 0375f7f | 2011-11-28 20:41:00 +0800 | [diff] [blame] | 5454 | kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 5455 | spin_unlock(&vcpu->kvm->mmu_lock); |
Avi Kivity | da4a00f | 2007-01-05 16:36:44 -0800 | [diff] [blame] | 5456 | } |
| 5457 | |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5458 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) |
| 5459 | { |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 5460 | gpa_t gpa; |
| 5461 | int r; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5462 | |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5463 | if (vcpu->arch.mmu->direct_map) |
Avi Kivity | 60f2478 | 2009-08-27 13:37:06 +0300 | [diff] [blame] | 5464 | return 0; |
| 5465 | |
Gleb Natapov | 1871c60 | 2010-02-10 14:21:32 +0200 | [diff] [blame] | 5466 | gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 5467 | |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 5468 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
Xiao Guangrong | 1cb3f3a | 2011-09-22 17:02:48 +0800 | [diff] [blame] | 5469 | |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 5470 | return r; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5471 | } |
Avi Kivity | 577bdc4 | 2008-07-19 08:57:05 +0300 | [diff] [blame] | 5472 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 5473 | |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5474 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, |
Andre Przywara | dc25e89 | 2010-12-21 11:12:07 +0100 | [diff] [blame] | 5475 | void *insn, int insn_len) |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5476 | { |
Sean Christopherson | 92daa48 | 2020-02-18 15:03:08 -0800 | [diff] [blame] | 5477 | int r, emulation_type = EMULTYPE_PF; |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5478 | bool direct = vcpu->arch.mmu->direct_map; |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5479 | |
Sean Christopherson | 6948199 | 2019-12-06 15:57:29 -0800 | [diff] [blame] | 5480 | if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) |
Sean Christopherson | ddce620 | 2019-12-06 15:57:27 -0800 | [diff] [blame] | 5481 | return RET_PF_RETRY; |
| 5482 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 5483 | r = RET_PF_INVALID; |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5484 | if (unlikely(error_code & PFERR_RSVD_MASK)) { |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5485 | r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct); |
Sean Christopherson | 472faff | 2018-08-23 13:56:50 -0700 | [diff] [blame] | 5486 | if (r == RET_PF_EMULATE) |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5487 | goto emulate; |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5488 | } |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5489 | |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 5490 | if (r == RET_PF_INVALID) { |
Sean Christopherson | 7a02674 | 2020-02-06 14:14:34 -0800 | [diff] [blame] | 5491 | r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, |
| 5492 | lower_32_bits(error_code), false); |
Sean Christopherson | 7b367bc | 2020-09-23 15:04:22 -0700 | [diff] [blame] | 5493 | if (WARN_ON_ONCE(r == RET_PF_INVALID)) |
| 5494 | return -EIO; |
Paolo Bonzini | 9b8ebbd | 2017-08-17 15:03:32 +0200 | [diff] [blame] | 5495 | } |
| 5496 | |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5497 | if (r < 0) |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5498 | return r; |
Sean Christopherson | 83a2ba4 | 2020-09-23 15:04:23 -0700 | [diff] [blame] | 5499 | if (r != RET_PF_EMULATE) |
| 5500 | return 1; |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5501 | |
Tom Lendacky | 1472775 | 2016-11-23 12:01:38 -0500 | [diff] [blame] | 5502 | /* |
| 5503 | * Before emulating the instruction, check if the error code |
| 5504 | * was due to a RO violation while translating the guest page. |
| 5505 | * This can occur when using nested virtualization with nested |
| 5506 | * paging in both guests. If true, we simply unprotect the page |
| 5507 | * and resume the guest. |
Tom Lendacky | 1472775 | 2016-11-23 12:01:38 -0500 | [diff] [blame] | 5508 | */ |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5509 | if (vcpu->arch.mmu->direct_map && |
Paolo Bonzini | eebed24 | 2016-11-28 14:39:58 +0100 | [diff] [blame] | 5510 | (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5511 | kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)); |
Tom Lendacky | 1472775 | 2016-11-23 12:01:38 -0500 | [diff] [blame] | 5512 | return 1; |
| 5513 | } |
| 5514 | |
Sean Christopherson | 472faff | 2018-08-23 13:56:50 -0700 | [diff] [blame] | 5515 | /* |
| 5516 | * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still |
| 5517 | * optimistically try to just unprotect the page and let the processor |
| 5518 | * re-execute the instruction that caused the page fault. Do not allow |
| 5519 | * retrying MMIO emulation, as it's not only pointless but could also |
| 5520 | * cause us to enter an infinite loop because the processor will keep |
Sean Christopherson | 6c3dfeb | 2018-08-23 13:56:51 -0700 | [diff] [blame] | 5521 | * faulting on the non-existent MMIO address. Retrying an instruction |
| 5522 | * from a nested guest is also pointless and dangerous as we are only |
| 5523 | * explicitly shadowing L1's page tables, i.e. unprotecting something |
| 5524 | * for L1 isn't going to magically fix whatever issue cause L2 to fail. |
Sean Christopherson | 472faff | 2018-08-23 13:56:50 -0700 | [diff] [blame] | 5525 | */ |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5526 | if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu)) |
Sean Christopherson | 92daa48 | 2020-02-18 15:03:08 -0800 | [diff] [blame] | 5527 | emulation_type |= EMULTYPE_ALLOW_RETRY_PF; |
Takuya Yoshikawa | e9ee956 | 2016-02-22 17:23:41 +0900 | [diff] [blame] | 5528 | emulate: |
Sean Christopherson | 736c291 | 2019-12-06 15:57:14 -0800 | [diff] [blame] | 5529 | return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 5530 | insn_len); |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 5531 | } |
| 5532 | EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); |
| 5533 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5534 | void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 5535 | gva_t gva, hpa_t root_hpa) |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 5536 | { |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5537 | int i; |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 5538 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5539 | /* It's actually a GPA for vcpu->arch.guest_mmu. */ |
| 5540 | if (mmu != &vcpu->arch.guest_mmu) { |
| 5541 | /* INVLPG on a non-canonical address is a NOP according to the SDM. */ |
| 5542 | if (is_noncanonical_address(gva, vcpu)) |
| 5543 | return; |
| 5544 | |
| 5545 | kvm_x86_ops.tlb_flush_gva(vcpu, gva); |
| 5546 | } |
| 5547 | |
| 5548 | if (!mmu->invlpg) |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5549 | return; |
| 5550 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5551 | if (root_hpa == INVALID_PAGE) { |
| 5552 | mmu->invlpg(vcpu, gva, mmu->root_hpa); |
Junaid Shahid | 956bf35 | 2018-06-27 14:59:18 -0700 | [diff] [blame] | 5553 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5554 | /* |
| 5555 | * INVLPG is required to invalidate any global mappings for the VA, |
| 5556 | * irrespective of PCID. Since it would take us roughly similar amount |
| 5557 | * of work to determine whether any of the prev_root mappings of the VA |
| 5558 | * is marked global, or to just sync it blindly, so we might as well |
| 5559 | * just always sync it. |
| 5560 | * |
| 5561 | * Mappings not reachable via the current cr3 or the prev_roots will be |
| 5562 | * synced when switching to that cr3, so nothing needs to be done here |
| 5563 | * for them. |
| 5564 | */ |
| 5565 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 5566 | if (VALID_PAGE(mmu->prev_roots[i].hpa)) |
| 5567 | mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); |
| 5568 | } else { |
| 5569 | mmu->invlpg(vcpu, gva, root_hpa); |
| 5570 | } |
| 5571 | } |
| 5572 | EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva); |
Junaid Shahid | 956bf35 | 2018-06-27 14:59:18 -0700 | [diff] [blame] | 5573 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5574 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) |
| 5575 | { |
| 5576 | kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE); |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 5577 | ++vcpu->stat.invlpg; |
| 5578 | } |
| 5579 | EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); |
| 5580 | |
Paolo Bonzini | 5efac07 | 2020-03-23 20:42:57 -0400 | [diff] [blame] | 5581 | |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5582 | void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) |
| 5583 | { |
Vitaly Kuznetsov | 44dd3ff | 2018-10-08 21:28:05 +0200 | [diff] [blame] | 5584 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5585 | bool tlb_flush = false; |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5586 | uint i; |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5587 | |
| 5588 | if (pcid == kvm_get_active_pcid(vcpu)) { |
Junaid Shahid | 7eb77e9 | 2018-06-27 14:59:16 -0700 | [diff] [blame] | 5589 | mmu->invlpg(vcpu, gva, mmu->root_hpa); |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5590 | tlb_flush = true; |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5591 | } |
| 5592 | |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5593 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { |
| 5594 | if (VALID_PAGE(mmu->prev_roots[i].hpa) && |
Sean Christopherson | be01e8e | 2020-03-20 14:28:32 -0700 | [diff] [blame] | 5595 | pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) { |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5596 | mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); |
| 5597 | tlb_flush = true; |
| 5598 | } |
Junaid Shahid | 956bf35 | 2018-06-27 14:59:18 -0700 | [diff] [blame] | 5599 | } |
Junaid Shahid | ade61e2 | 2018-06-27 14:59:15 -0700 | [diff] [blame] | 5600 | |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5601 | if (tlb_flush) |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 5602 | kvm_x86_ops.tlb_flush_gva(vcpu, gva); |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5603 | |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5604 | ++vcpu->stat.invlpg; |
| 5605 | |
| 5606 | /* |
Junaid Shahid | b94742c | 2018-06-27 14:59:20 -0700 | [diff] [blame] | 5607 | * Mappings not reachable via the current cr3 or the prev_roots will be |
| 5608 | * synced when switching to that cr3, so nothing needs to be done here |
| 5609 | * for them. |
Junaid Shahid | eb4b248 | 2018-06-27 14:59:14 -0700 | [diff] [blame] | 5610 | */ |
| 5611 | } |
| 5612 | EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva); |
| 5613 | |
Sean Christopherson | 8301305 | 2020-07-15 20:41:22 -0700 | [diff] [blame] | 5614 | void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level, |
| 5615 | int tdp_huge_page_level) |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 5616 | { |
Sean Christopherson | bde7723 | 2020-03-02 15:57:02 -0800 | [diff] [blame] | 5617 | tdp_enabled = enable_tdp; |
Sean Christopherson | 8301305 | 2020-07-15 20:41:22 -0700 | [diff] [blame] | 5618 | max_tdp_level = tdp_max_root_level; |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 5619 | |
| 5620 | /* |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 5621 | * max_huge_page_level reflects KVM's MMU capabilities irrespective |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 5622 | * of kernel support, e.g. KVM may be capable of using 1GB pages when |
| 5623 | * the kernel is not. But, KVM never creates a page size greater than |
| 5624 | * what is used by the kernel for any given HVA, i.e. the kernel's |
| 5625 | * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust(). |
| 5626 | */ |
| 5627 | if (tdp_enabled) |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 5628 | max_huge_page_level = tdp_huge_page_level; |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 5629 | else if (boot_cpu_has(X86_FEATURE_GBPAGES)) |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 5630 | max_huge_page_level = PG_LEVEL_1G; |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 5631 | else |
Sean Christopherson | 1d92d2e | 2020-07-15 20:41:21 -0700 | [diff] [blame] | 5632 | max_huge_page_level = PG_LEVEL_2M; |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 5633 | } |
Sean Christopherson | bde7723 | 2020-03-02 15:57:02 -0800 | [diff] [blame] | 5634 | EXPORT_SYMBOL_GPL(kvm_configure_mmu); |
Xiao Guangrong | 13d268c | 2016-02-24 17:51:16 +0800 | [diff] [blame] | 5635 | |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5636 | /* The return value indicates if tlb flush on all vcpus is needed. */ |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5637 | typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5638 | |
| 5639 | /* The caller should hold mmu-lock before calling this function. */ |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5640 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5641 | slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5642 | slot_level_handler fn, int start_level, int end_level, |
| 5643 | gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) |
| 5644 | { |
| 5645 | struct slot_rmap_walk_iterator iterator; |
| 5646 | bool flush = false; |
| 5647 | |
| 5648 | for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, |
| 5649 | end_gfn, &iterator) { |
| 5650 | if (iterator.rmap) |
| 5651 | flush |= fn(kvm, iterator.rmap); |
| 5652 | |
| 5653 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { |
| 5654 | if (flush && lock_flush_tlb) { |
Ben Gardon | f285c63 | 2019-03-12 11:45:59 -0700 | [diff] [blame] | 5655 | kvm_flush_remote_tlbs_with_address(kvm, |
| 5656 | start_gfn, |
| 5657 | iterator.gfn - start_gfn + 1); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5658 | flush = false; |
| 5659 | } |
| 5660 | cond_resched_lock(&kvm->mmu_lock); |
| 5661 | } |
| 5662 | } |
| 5663 | |
| 5664 | if (flush && lock_flush_tlb) { |
Ben Gardon | f285c63 | 2019-03-12 11:45:59 -0700 | [diff] [blame] | 5665 | kvm_flush_remote_tlbs_with_address(kvm, start_gfn, |
| 5666 | end_gfn - start_gfn + 1); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5667 | flush = false; |
| 5668 | } |
| 5669 | |
| 5670 | return flush; |
| 5671 | } |
| 5672 | |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5673 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5674 | slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5675 | slot_level_handler fn, int start_level, int end_level, |
| 5676 | bool lock_flush_tlb) |
| 5677 | { |
| 5678 | return slot_handle_level_range(kvm, memslot, fn, start_level, |
| 5679 | end_level, memslot->base_gfn, |
| 5680 | memslot->base_gfn + memslot->npages - 1, |
| 5681 | lock_flush_tlb); |
| 5682 | } |
| 5683 | |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5684 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5685 | slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5686 | slot_level_handler fn, bool lock_flush_tlb) |
| 5687 | { |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 5688 | return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, |
Sean Christopherson | e662ec3 | 2020-04-27 17:54:21 -0700 | [diff] [blame] | 5689 | KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5690 | } |
| 5691 | |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5692 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5693 | slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5694 | slot_level_handler fn, bool lock_flush_tlb) |
| 5695 | { |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 5696 | return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K + 1, |
Sean Christopherson | e662ec3 | 2020-04-27 17:54:21 -0700 | [diff] [blame] | 5697 | KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5698 | } |
| 5699 | |
David Woodhouse | 928a4c3 | 2018-02-10 23:39:24 +0000 | [diff] [blame] | 5700 | static __always_inline bool |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5701 | slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 5702 | slot_level_handler fn, bool lock_flush_tlb) |
| 5703 | { |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 5704 | return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, |
| 5705 | PG_LEVEL_4K, lock_flush_tlb); |
Xiao Guangrong | 1bad2b2 | 2015-05-13 14:42:23 +0800 | [diff] [blame] | 5706 | } |
| 5707 | |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5708 | static void free_mmu_pages(struct kvm_mmu *mmu) |
Takuya Yoshikawa | b99db1d | 2013-01-08 19:44:48 +0900 | [diff] [blame] | 5709 | { |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5710 | free_page((unsigned long)mmu->pae_root); |
| 5711 | free_page((unsigned long)mmu->lm_root); |
Takuya Yoshikawa | 6b81b05 | 2013-01-08 19:47:33 +0900 | [diff] [blame] | 5712 | } |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5713 | |
Sean Christopherson | 04d28e3 | 2020-09-23 09:33:14 -0700 | [diff] [blame] | 5714 | static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
Avi Kivity | 8234b22 | 2010-12-27 12:08:45 +0200 | [diff] [blame] | 5715 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5716 | struct page *page; |
Takuya Yoshikawa | b99db1d | 2013-01-08 19:44:48 +0900 | [diff] [blame] | 5717 | int i; |
Takuya Yoshikawa | 9d1beef | 2013-01-08 19:46:48 +0900 | [diff] [blame] | 5718 | |
Sean Christopherson | 04d28e3 | 2020-09-23 09:33:14 -0700 | [diff] [blame] | 5719 | mmu->root_hpa = INVALID_PAGE; |
| 5720 | mmu->root_pgd = 0; |
| 5721 | mmu->translate_gpa = translate_gpa; |
| 5722 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
| 5723 | mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; |
| 5724 | |
Sean Christopherson | b6b80c7 | 2019-06-13 10:22:23 -0700 | [diff] [blame] | 5725 | /* |
| 5726 | * When using PAE paging, the four PDPTEs are treated as 'root' pages, |
| 5727 | * while the PDP table is a per-vCPU construct that's allocated at MMU |
| 5728 | * creation. When emulating 32-bit mode, cr3 is only 32 bits even on |
| 5729 | * x86_64. Therefore we need to allocate the PDP table in the first |
| 5730 | * 4GB of memory, which happens to fit the DMA32 zone. Except for |
| 5731 | * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can |
| 5732 | * skip allocating the PDP table. |
| 5733 | */ |
Sean Christopherson | d468d94 | 2020-07-15 20:41:20 -0700 | [diff] [blame] | 5734 | if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5735 | return 0; |
| 5736 | |
Ben Gardon | 254272c | 2019-02-11 11:02:50 -0800 | [diff] [blame] | 5737 | page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32); |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5738 | if (!page) |
| 5739 | return -ENOMEM; |
| 5740 | |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5741 | mmu->pae_root = page_address(page); |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5742 | for (i = 0; i < 4; ++i) |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5743 | mmu->pae_root[i] = INVALID_PAGE; |
Xiao Guangrong | 198c74f | 2014-04-17 17:06:16 +0800 | [diff] [blame] | 5744 | |
| 5745 | return 0; |
| 5746 | } |
| 5747 | |
Kai Huang | d91ffee | 2015-01-12 15:28:54 +0800 | [diff] [blame] | 5748 | int kvm_mmu_create(struct kvm_vcpu *vcpu) |
| 5749 | { |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5750 | int ret; |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 5751 | |
Sean Christopherson | 5962bfb | 2020-07-02 19:35:25 -0700 | [diff] [blame] | 5752 | vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache; |
Sean Christopherson | 5f6078f | 2020-07-02 19:35:34 -0700 | [diff] [blame] | 5753 | vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO; |
| 5754 | |
Sean Christopherson | 5962bfb | 2020-07-02 19:35:25 -0700 | [diff] [blame] | 5755 | vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache; |
Sean Christopherson | 5f6078f | 2020-07-02 19:35:34 -0700 | [diff] [blame] | 5756 | vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO; |
Sean Christopherson | 5962bfb | 2020-07-02 19:35:25 -0700 | [diff] [blame] | 5757 | |
Sean Christopherson | 9688088 | 2020-07-02 19:35:35 -0700 | [diff] [blame] | 5758 | vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO; |
| 5759 | |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5760 | vcpu->arch.mmu = &vcpu->arch.root_mmu; |
| 5761 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; |
| 5762 | |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5763 | vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5764 | |
Sean Christopherson | 04d28e3 | 2020-09-23 09:33:14 -0700 | [diff] [blame] | 5765 | ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu); |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5766 | if (ret) |
| 5767 | return ret; |
| 5768 | |
Sean Christopherson | 04d28e3 | 2020-09-23 09:33:14 -0700 | [diff] [blame] | 5769 | ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu); |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 5770 | if (ret) |
| 5771 | goto fail_allocate_root; |
| 5772 | |
| 5773 | return ret; |
| 5774 | fail_allocate_root: |
| 5775 | free_mmu_pages(&vcpu->arch.guest_mmu); |
| 5776 | return ret; |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5777 | } |
| 5778 | |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5779 | #define BATCH_ZAP_PAGES 10 |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5780 | static void kvm_zap_obsolete_pages(struct kvm *kvm) |
| 5781 | { |
| 5782 | struct kvm_mmu_page *sp, *node; |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5783 | int nr_zapped, batch = 0; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5784 | |
| 5785 | restart: |
| 5786 | list_for_each_entry_safe_reverse(sp, node, |
| 5787 | &kvm->arch.active_mmu_pages, link) { |
| 5788 | /* |
| 5789 | * No obsolete valid page exists before a newly created page |
| 5790 | * since active_mmu_pages is a FIFO list. |
| 5791 | */ |
| 5792 | if (!is_obsolete_sp(kvm, sp)) |
| 5793 | break; |
| 5794 | |
| 5795 | /* |
Sean Christopherson | f95eec9 | 2020-06-23 12:35:39 -0700 | [diff] [blame] | 5796 | * Invalid pages should never land back on the list of active |
| 5797 | * pages. Skip the bogus page, otherwise we'll get stuck in an |
| 5798 | * infinite loop if the page gets put back on the list (again). |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5799 | */ |
Sean Christopherson | f95eec9 | 2020-06-23 12:35:39 -0700 | [diff] [blame] | 5800 | if (WARN_ON(sp->role.invalid)) |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5801 | continue; |
| 5802 | |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5803 | /* |
| 5804 | * No need to flush the TLB since we're only zapping shadow |
| 5805 | * pages with an obsolete generation number and all vCPUS have |
| 5806 | * loaded a new root, i.e. the shadow pages being zapped cannot |
| 5807 | * be in active use by the guest. |
| 5808 | */ |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5809 | if (batch >= BATCH_ZAP_PAGES && |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5810 | cond_resched_lock(&kvm->mmu_lock)) { |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5811 | batch = 0; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5812 | goto restart; |
| 5813 | } |
| 5814 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5815 | if (__kvm_mmu_prepare_zap_page(kvm, sp, |
| 5816 | &kvm->arch.zapped_obsolete_pages, &nr_zapped)) { |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5817 | batch += nr_zapped; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5818 | goto restart; |
Sean Christopherson | fbb158c | 2019-09-12 19:46:07 -0700 | [diff] [blame] | 5819 | } |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5820 | } |
| 5821 | |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5822 | /* |
| 5823 | * Trigger a remote TLB flush before freeing the page tables to ensure |
| 5824 | * KVM is not in the middle of a lockless shadow page table walk, which |
| 5825 | * may reference the pages. |
| 5826 | */ |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5827 | kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5828 | } |
| 5829 | |
| 5830 | /* |
| 5831 | * Fast invalidate all shadow pages and use lock-break technique |
| 5832 | * to zap obsolete pages. |
| 5833 | * |
| 5834 | * It's required when memslot is being deleted or VM is being |
| 5835 | * destroyed, in these cases, we should ensure that KVM MMU does |
| 5836 | * not use any resource of the being-deleted slot or all slots |
| 5837 | * after calling the function. |
| 5838 | */ |
| 5839 | static void kvm_mmu_zap_all_fast(struct kvm *kvm) |
| 5840 | { |
Sean Christopherson | ca333ad | 2019-09-12 19:46:11 -0700 | [diff] [blame] | 5841 | lockdep_assert_held(&kvm->slots_lock); |
| 5842 | |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5843 | spin_lock(&kvm->mmu_lock); |
Sean Christopherson | 14a3c4f | 2019-09-12 19:46:06 -0700 | [diff] [blame] | 5844 | trace_kvm_mmu_zap_all_fast(kvm); |
Sean Christopherson | ca333ad | 2019-09-12 19:46:11 -0700 | [diff] [blame] | 5845 | |
| 5846 | /* |
| 5847 | * Toggle mmu_valid_gen between '0' and '1'. Because slots_lock is |
| 5848 | * held for the entire duration of zapping obsolete pages, it's |
| 5849 | * impossible for there to be multiple invalid generations associated |
| 5850 | * with *valid* shadow pages at any given time, i.e. there is exactly |
| 5851 | * one valid generation and (at most) one invalid generation. |
| 5852 | */ |
| 5853 | kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5854 | |
Sean Christopherson | 4506ecf | 2019-09-12 19:46:08 -0700 | [diff] [blame] | 5855 | /* |
| 5856 | * Notify all vcpus to reload its shadow page table and flush TLB. |
| 5857 | * Then all vcpus will switch to new shadow page table with the new |
| 5858 | * mmu_valid_gen. |
| 5859 | * |
| 5860 | * Note: we need to do this under the protection of mmu_lock, |
| 5861 | * otherwise, vcpu would purge shadow page but miss tlb flush. |
| 5862 | */ |
| 5863 | kvm_reload_remote_mmus(kvm); |
| 5864 | |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5865 | kvm_zap_obsolete_pages(kvm); |
| 5866 | spin_unlock(&kvm->mmu_lock); |
| 5867 | } |
| 5868 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 5869 | static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) |
| 5870 | { |
| 5871 | return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); |
| 5872 | } |
| 5873 | |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5874 | static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, |
| 5875 | struct kvm_memory_slot *slot, |
| 5876 | struct kvm_page_track_notifier_node *node) |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5877 | { |
Sean Christopherson | 002c5f7 | 2019-09-12 19:46:02 -0700 | [diff] [blame] | 5878 | kvm_mmu_zap_all_fast(kvm); |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5879 | } |
| 5880 | |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5881 | void kvm_mmu_init_vm(struct kvm *kvm) |
| 5882 | { |
| 5883 | struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; |
| 5884 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5885 | node->track_write = kvm_mmu_pte_write; |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5886 | node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; |
| 5887 | kvm_page_track_register_notifier(kvm, node); |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5888 | } |
| 5889 | |
| 5890 | void kvm_mmu_uninit_vm(struct kvm *kvm) |
| 5891 | { |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5892 | struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5893 | |
| 5894 | kvm_page_track_unregister_notifier(kvm, node); |
| 5895 | } |
| 5896 | |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5897 | void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5898 | { |
| 5899 | struct kvm_memslots *slots; |
| 5900 | struct kvm_memory_slot *memslot; |
| 5901 | int i; |
| 5902 | |
| 5903 | spin_lock(&kvm->mmu_lock); |
| 5904 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 5905 | slots = __kvm_memslots(kvm, i); |
| 5906 | kvm_for_each_memslot(memslot, slots) { |
| 5907 | gfn_t start, end; |
| 5908 | |
| 5909 | start = max(gfn_start, memslot->base_gfn); |
| 5910 | end = min(gfn_end, memslot->base_gfn + memslot->npages); |
| 5911 | if (start >= end) |
| 5912 | continue; |
| 5913 | |
Ben Gardon | 92da008 | 2019-03-12 11:45:58 -0700 | [diff] [blame] | 5914 | slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, |
Sean Christopherson | 3bae045 | 2020-04-27 17:54:22 -0700 | [diff] [blame] | 5915 | PG_LEVEL_4K, |
Sean Christopherson | e662ec3 | 2020-04-27 17:54:21 -0700 | [diff] [blame] | 5916 | KVM_MAX_HUGEPAGE_LEVEL, |
Ben Gardon | 92da008 | 2019-03-12 11:45:58 -0700 | [diff] [blame] | 5917 | start, end - 1, true); |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 5918 | } |
Xiao Guangrong | efdfe53 | 2015-05-13 14:42:27 +0800 | [diff] [blame] | 5919 | } |
| 5920 | |
| 5921 | spin_unlock(&kvm->mmu_lock); |
| 5922 | } |
| 5923 | |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5924 | static bool slot_rmap_write_protect(struct kvm *kvm, |
| 5925 | struct kvm_rmap_head *rmap_head) |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5926 | { |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5927 | return __rmap_write_protect(kvm, rmap_head, false); |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5928 | } |
| 5929 | |
Dor Laor | e0fa826 | 2007-03-30 13:06:33 +0300 | [diff] [blame] | 5930 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 5931 | struct kvm_memory_slot *memslot, |
| 5932 | int start_level) |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 5933 | { |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 5934 | bool flush; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 5935 | |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 5936 | spin_lock(&kvm->mmu_lock); |
Jay Zhou | 3c9bd40 | 2020-02-27 09:32:27 +0800 | [diff] [blame] | 5937 | flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, |
Sean Christopherson | e662ec3 | 2020-04-27 17:54:21 -0700 | [diff] [blame] | 5938 | start_level, KVM_MAX_HUGEPAGE_LEVEL, false); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5939 | spin_unlock(&kvm->mmu_lock); |
| 5940 | |
| 5941 | /* |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5942 | * We can flush all the TLBs out of the mmu lock without TLB |
| 5943 | * corruption since we just change the spte from writable to |
Xiao Guangrong | e7d11c7 | 2013-05-31 08:36:27 +0800 | [diff] [blame] | 5944 | * readonly so that we only need to care the case of changing |
| 5945 | * spte from present to present (changing the spte from present |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5946 | * to nonpresent will flush all the TLBs immediately), in other |
| 5947 | * words, the only case we care is mmu_spte_update() where we |
Wei Yang | bdd303c | 2018-11-05 14:45:03 +0800 | [diff] [blame] | 5948 | * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 5949 | * instead of PT_WRITABLE_MASK, that means it does not depend |
| 5950 | * on PT_WRITABLE_MASK anymore. |
| 5951 | */ |
| 5952 | if (flush) |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 5953 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5954 | } |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5955 | |
| 5956 | static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5957 | struct kvm_rmap_head *rmap_head) |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5958 | { |
| 5959 | u64 *sptep; |
| 5960 | struct rmap_iterator iter; |
| 5961 | int need_tlb_flush = 0; |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 5962 | kvm_pfn_t pfn; |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5963 | struct kvm_mmu_page *sp; |
| 5964 | |
| 5965 | restart: |
Takuya Yoshikawa | 018aabb5 | 2015-11-20 17:41:28 +0900 | [diff] [blame] | 5966 | for_each_rmap_spte(rmap_head, &iter, sptep) { |
Sean Christopherson | 5735468 | 2020-06-22 13:20:33 -0700 | [diff] [blame] | 5967 | sp = sptep_to_sp(sptep); |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5968 | pfn = spte_to_pfn(*sptep); |
| 5969 | |
| 5970 | /* |
Xiao Guangrong | decf633 | 2015-04-14 12:04:10 +0800 | [diff] [blame] | 5971 | * We cannot do huge page mapping for indirect shadow pages, |
| 5972 | * which are found on the last rmap (level = 1) when not using |
| 5973 | * tdp; such shadow pages are synced with the page table in |
| 5974 | * the guest, and the guest page table is using 4K page size |
| 5975 | * mapping if the indirect sp has level = 1. |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5976 | */ |
Sean Christopherson | a78986a | 2019-11-11 14:12:27 -0800 | [diff] [blame] | 5977 | if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && |
Sean Christopherson | e851265 | 2020-01-08 12:24:48 -0800 | [diff] [blame] | 5978 | (kvm_is_zone_device_pfn(pfn) || |
| 5979 | PageCompound(pfn_to_page(pfn)))) { |
Wei Yang | e791238 | 2018-10-04 10:04:23 +0800 | [diff] [blame] | 5980 | pte_list_remove(rmap_head, sptep); |
Lan Tianyu | 40ef75a | 2018-12-06 21:21:08 +0800 | [diff] [blame] | 5981 | |
| 5982 | if (kvm_available_flush_tlb_with_range()) |
| 5983 | kvm_flush_remote_tlbs_with_address(kvm, sp->gfn, |
| 5984 | KVM_PAGES_PER_HPAGE(sp->role.level)); |
| 5985 | else |
| 5986 | need_tlb_flush = 1; |
| 5987 | |
Xiao Guangrong | 0d53679 | 2015-05-13 14:42:20 +0800 | [diff] [blame] | 5988 | goto restart; |
| 5989 | } |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5990 | } |
| 5991 | |
| 5992 | return need_tlb_flush; |
| 5993 | } |
| 5994 | |
| 5995 | void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 5996 | const struct kvm_memory_slot *memslot) |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5997 | { |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 5998 | /* FIXME: const-ify all uses of struct kvm_memory_slot. */ |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 5999 | spin_lock(&kvm->mmu_lock); |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 6000 | slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, |
| 6001 | kvm_mmu_zap_collapsible_spte, true); |
Wanpeng Li | 3ea3b7f | 2015-04-03 15:40:25 +0800 | [diff] [blame] | 6002 | spin_unlock(&kvm->mmu_lock); |
| 6003 | } |
| 6004 | |
Sean Christopherson | b3594ff | 2020-02-18 13:07:34 -0800 | [diff] [blame] | 6005 | void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, |
| 6006 | struct kvm_memory_slot *memslot) |
| 6007 | { |
| 6008 | /* |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 6009 | * All current use cases for flushing the TLBs for a specific memslot |
| 6010 | * are related to dirty logging, and do the TLB flush out of mmu_lock. |
| 6011 | * The interaction between the various operations on memslot must be |
| 6012 | * serialized by slots_locks to ensure the TLB flush from one operation |
| 6013 | * is observed by any other operation on the same memslot. |
Sean Christopherson | b3594ff | 2020-02-18 13:07:34 -0800 | [diff] [blame] | 6014 | */ |
| 6015 | lockdep_assert_held(&kvm->slots_lock); |
Sean Christopherson | cec3764 | 2020-02-18 13:07:35 -0800 | [diff] [blame] | 6016 | kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, |
| 6017 | memslot->npages); |
Sean Christopherson | b3594ff | 2020-02-18 13:07:34 -0800 | [diff] [blame] | 6018 | } |
| 6019 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6020 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, |
| 6021 | struct kvm_memory_slot *memslot) |
| 6022 | { |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 6023 | bool flush; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6024 | |
| 6025 | spin_lock(&kvm->mmu_lock); |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 6026 | flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6027 | spin_unlock(&kvm->mmu_lock); |
| 6028 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6029 | /* |
| 6030 | * It's also safe to flush TLBs out of mmu lock here as currently this |
| 6031 | * function is only used for dirty logging, in which case flushing TLB |
| 6032 | * out of mmu lock also guarantees no dirty pages will be lost in |
| 6033 | * dirty_bitmap. |
| 6034 | */ |
| 6035 | if (flush) |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 6036 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6037 | } |
| 6038 | EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); |
| 6039 | |
| 6040 | void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, |
| 6041 | struct kvm_memory_slot *memslot) |
| 6042 | { |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 6043 | bool flush; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6044 | |
| 6045 | spin_lock(&kvm->mmu_lock); |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 6046 | flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, |
| 6047 | false); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6048 | spin_unlock(&kvm->mmu_lock); |
| 6049 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6050 | if (flush) |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 6051 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6052 | } |
| 6053 | EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); |
| 6054 | |
| 6055 | void kvm_mmu_slot_set_dirty(struct kvm *kvm, |
| 6056 | struct kvm_memory_slot *memslot) |
| 6057 | { |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 6058 | bool flush; |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6059 | |
| 6060 | spin_lock(&kvm->mmu_lock); |
Xiao Guangrong | d77aa73 | 2015-05-13 14:42:24 +0800 | [diff] [blame] | 6061 | flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6062 | spin_unlock(&kvm->mmu_lock); |
| 6063 | |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6064 | if (flush) |
Sean Christopherson | 7f42aa7 | 2020-02-18 13:07:36 -0800 | [diff] [blame] | 6065 | kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); |
Kai Huang | f4b4b18 | 2015-01-28 10:54:24 +0800 | [diff] [blame] | 6066 | } |
| 6067 | EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); |
| 6068 | |
Sean Christopherson | 92f58b5 | 2019-09-12 19:46:04 -0700 | [diff] [blame] | 6069 | void kvm_mmu_zap_all(struct kvm *kvm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 6070 | { |
| 6071 | struct kvm_mmu_page *sp, *node; |
Sean Christopherson | 7390de1 | 2019-02-05 13:01:31 -0800 | [diff] [blame] | 6072 | LIST_HEAD(invalid_list); |
Sean Christopherson | 83cdb56 | 2019-02-05 13:01:35 -0800 | [diff] [blame] | 6073 | int ign; |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 6074 | |
Sean Christopherson | 7390de1 | 2019-02-05 13:01:31 -0800 | [diff] [blame] | 6075 | spin_lock(&kvm->mmu_lock); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 6076 | restart: |
Sean Christopherson | 8a674ad | 2019-02-05 13:01:32 -0800 | [diff] [blame] | 6077 | list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { |
Sean Christopherson | f95eec9 | 2020-06-23 12:35:39 -0700 | [diff] [blame] | 6078 | if (WARN_ON(sp->role.invalid)) |
Sean Christopherson | 8a674ad | 2019-02-05 13:01:32 -0800 | [diff] [blame] | 6079 | continue; |
Sean Christopherson | 92f58b5 | 2019-09-12 19:46:04 -0700 | [diff] [blame] | 6080 | if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 6081 | goto restart; |
Sean Christopherson | 24efe61 | 2019-02-05 13:01:36 -0800 | [diff] [blame] | 6082 | if (cond_resched_lock(&kvm->mmu_lock)) |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 6083 | goto restart; |
| 6084 | } |
| 6085 | |
Sean Christopherson | 4771450 | 2019-02-05 13:01:23 -0800 | [diff] [blame] | 6086 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Xiao Guangrong | 5304b8d | 2013-05-31 08:36:22 +0800 | [diff] [blame] | 6087 | spin_unlock(&kvm->mmu_lock); |
| 6088 | } |
| 6089 | |
Sean Christopherson | 1524825 | 2019-02-05 12:54:17 -0800 | [diff] [blame] | 6090 | void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 6091 | { |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 6092 | WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 6093 | |
Sean Christopherson | 164bf7e | 2019-02-05 13:01:18 -0800 | [diff] [blame] | 6094 | gen &= MMIO_SPTE_GEN_MASK; |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 6095 | |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 6096 | /* |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 6097 | * Generation numbers are incremented in multiples of the number of |
| 6098 | * address spaces in order to provide unique generations across all |
| 6099 | * address spaces. Strip what is effectively the address space |
| 6100 | * modifier prior to checking for a wrap of the MMIO generation so |
| 6101 | * that a wrap in any address space is detected. |
| 6102 | */ |
| 6103 | gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1); |
| 6104 | |
| 6105 | /* |
| 6106 | * The very rare case: if the MMIO generation number has wrapped, |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 6107 | * zap all shadow pages. |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 6108 | */ |
Sean Christopherson | e1359e2 | 2019-02-05 13:01:12 -0800 | [diff] [blame] | 6109 | if (unlikely(gen == 0)) { |
Bandan Das | ae0f549 | 2016-11-15 01:36:18 -0500 | [diff] [blame] | 6110 | kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); |
Sean Christopherson | 92f58b5 | 2019-09-12 19:46:04 -0700 | [diff] [blame] | 6111 | kvm_mmu_zap_all_fast(kvm); |
Takuya Yoshikawa | 7a2e8aa | 2013-06-21 01:34:31 +0900 | [diff] [blame] | 6112 | } |
Xiao Guangrong | f8f5594 | 2013-06-07 16:51:26 +0800 | [diff] [blame] | 6113 | } |
| 6114 | |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6115 | static unsigned long |
| 6116 | mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6117 | { |
| 6118 | struct kvm *kvm; |
Ying Han | 1495f23 | 2011-05-24 17:12:27 -0700 | [diff] [blame] | 6119 | int nr_to_scan = sc->nr_to_scan; |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6120 | unsigned long freed = 0; |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6121 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 6122 | mutex_lock(&kvm_lock); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6123 | |
| 6124 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Jan Kiszka | 3d56cbd | 2011-12-02 18:35:24 +0100 | [diff] [blame] | 6125 | int idx; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 6126 | LIST_HEAD(invalid_list); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6127 | |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6128 | /* |
Takuya Yoshikawa | 35f2d16 | 2012-08-20 18:35:39 +0900 | [diff] [blame] | 6129 | * Never scan more than sc->nr_to_scan VM instances. |
| 6130 | * Will not hit this condition practically since we do not try |
| 6131 | * to shrink more than one VM and it is very unlikely to see |
| 6132 | * !n_used_mmu_pages so many times. |
| 6133 | */ |
| 6134 | if (!nr_to_scan--) |
| 6135 | break; |
| 6136 | /* |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6137 | * n_used_mmu_pages is accessed without holding kvm->mmu_lock |
| 6138 | * here. We may skip a VM instance errorneosly, but we do not |
| 6139 | * want to shrink a VM that only started to populate its MMU |
| 6140 | * anyway. |
| 6141 | */ |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 6142 | if (!kvm->arch.n_used_mmu_pages && |
| 6143 | !kvm_has_zapped_obsolete_pages(kvm)) |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6144 | continue; |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6145 | |
Marcelo Tosatti | f656ce0 | 2009-12-23 14:35:25 -0200 | [diff] [blame] | 6146 | idx = srcu_read_lock(&kvm->srcu); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6147 | spin_lock(&kvm->mmu_lock); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6148 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 6149 | if (kvm_has_zapped_obsolete_pages(kvm)) { |
| 6150 | kvm_mmu_commit_zap_page(kvm, |
| 6151 | &kvm->arch.zapped_obsolete_pages); |
| 6152 | goto unlock; |
| 6153 | } |
| 6154 | |
Sean Christopherson | ebdb292 | 2020-06-23 12:35:41 -0700 | [diff] [blame] | 6155 | freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan); |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6156 | |
Sean Christopherson | 1060520 | 2019-09-12 19:46:10 -0700 | [diff] [blame] | 6157 | unlock: |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6158 | spin_unlock(&kvm->mmu_lock); |
Marcelo Tosatti | f656ce0 | 2009-12-23 14:35:25 -0200 | [diff] [blame] | 6159 | srcu_read_unlock(&kvm->srcu, idx); |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6160 | |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6161 | /* |
| 6162 | * unfair on small ones |
| 6163 | * per-vm shrinkers cry out |
| 6164 | * sadness comes quickly |
| 6165 | */ |
Gleb Natapov | 1952639 | 2012-06-04 14:53:23 +0300 | [diff] [blame] | 6166 | list_move_tail(&kvm->vm_list, &vm_list); |
| 6167 | break; |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6168 | } |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6169 | |
Junaid Shahid | 0d9ce16 | 2019-01-03 17:14:28 -0800 | [diff] [blame] | 6170 | mutex_unlock(&kvm_lock); |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6171 | return freed; |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6172 | } |
| 6173 | |
| 6174 | static unsigned long |
| 6175 | mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
| 6176 | { |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 6177 | return percpu_counter_read_positive(&kvm_total_used_mmu_pages); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6178 | } |
| 6179 | |
| 6180 | static struct shrinker mmu_shrinker = { |
Dave Chinner | 70534a7 | 2013-08-28 10:18:14 +1000 | [diff] [blame] | 6181 | .count_objects = mmu_shrink_count, |
| 6182 | .scan_objects = mmu_shrink_scan, |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6183 | .seeks = DEFAULT_SEEKS * 10, |
| 6184 | }; |
| 6185 | |
Ingo Molnar | 2ddfd20 | 2008-05-22 10:37:48 +0200 | [diff] [blame] | 6186 | static void mmu_destroy_caches(void) |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6187 | { |
Tim Hansen | c1bd743 | 2017-10-07 23:15:23 -0400 | [diff] [blame] | 6188 | kmem_cache_destroy(pte_list_desc_cache); |
| 6189 | kmem_cache_destroy(mmu_page_header_cache); |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6190 | } |
| 6191 | |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6192 | static void kvm_set_mmio_spte_mask(void) |
| 6193 | { |
| 6194 | u64 mask; |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6195 | |
| 6196 | /* |
Sean Christopherson | 6129ed8 | 2020-05-27 01:49:09 -0700 | [diff] [blame] | 6197 | * Set a reserved PA bit in MMIO SPTEs to generate page faults with |
| 6198 | * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT |
| 6199 | * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports |
| 6200 | * 52-bit physical addresses then there are no reserved PA bits in the |
| 6201 | * PTEs and so the reserved PA approach must be disabled. |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6202 | */ |
Sean Christopherson | 6129ed8 | 2020-05-27 01:49:09 -0700 | [diff] [blame] | 6203 | if (shadow_phys_bits < 52) |
| 6204 | mask = BIT_ULL(51) | PT_PRESENT_MASK; |
| 6205 | else |
| 6206 | mask = 0; |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6207 | |
Paolo Bonzini | e7581ca | 2020-05-19 05:04:49 -0400 | [diff] [blame] | 6208 | kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK); |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6209 | } |
| 6210 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 6211 | static bool get_nx_auto_mode(void) |
| 6212 | { |
| 6213 | /* Return true when CPU has the bug, and mitigations are ON */ |
| 6214 | return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off(); |
| 6215 | } |
| 6216 | |
| 6217 | static void __set_nx_huge_pages(bool val) |
| 6218 | { |
| 6219 | nx_huge_pages = itlb_multihit_kvm_mitigation = val; |
| 6220 | } |
| 6221 | |
| 6222 | static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) |
| 6223 | { |
| 6224 | bool old_val = nx_huge_pages; |
| 6225 | bool new_val; |
| 6226 | |
| 6227 | /* In "auto" mode deploy workaround only if CPU has the bug. */ |
| 6228 | if (sysfs_streq(val, "off")) |
| 6229 | new_val = 0; |
| 6230 | else if (sysfs_streq(val, "force")) |
| 6231 | new_val = 1; |
| 6232 | else if (sysfs_streq(val, "auto")) |
| 6233 | new_val = get_nx_auto_mode(); |
| 6234 | else if (strtobool(val, &new_val) < 0) |
| 6235 | return -EINVAL; |
| 6236 | |
| 6237 | __set_nx_huge_pages(new_val); |
| 6238 | |
| 6239 | if (new_val != old_val) { |
| 6240 | struct kvm *kvm; |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 6241 | |
| 6242 | mutex_lock(&kvm_lock); |
| 6243 | |
| 6244 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Sean Christopherson | ed69a6c | 2019-11-13 11:30:32 -0800 | [diff] [blame] | 6245 | mutex_lock(&kvm->slots_lock); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 6246 | kvm_mmu_zap_all_fast(kvm); |
Sean Christopherson | ed69a6c | 2019-11-13 11:30:32 -0800 | [diff] [blame] | 6247 | mutex_unlock(&kvm->slots_lock); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6248 | |
| 6249 | wake_up_process(kvm->arch.nx_lpage_recovery_thread); |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 6250 | } |
| 6251 | mutex_unlock(&kvm_lock); |
| 6252 | } |
| 6253 | |
| 6254 | return 0; |
| 6255 | } |
| 6256 | |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6257 | int kvm_mmu_module_init(void) |
| 6258 | { |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6259 | int ret = -ENOMEM; |
| 6260 | |
Paolo Bonzini | b8e8c83 | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 6261 | if (nx_huge_pages == -1) |
| 6262 | __set_nx_huge_pages(get_nx_auto_mode()); |
| 6263 | |
Vitaly Kuznetsov | 36d9594d | 2018-10-08 21:28:10 +0200 | [diff] [blame] | 6264 | /* |
| 6265 | * MMU roles use union aliasing which is, generally speaking, an |
| 6266 | * undefined behavior. However, we supposedly know how compilers behave |
| 6267 | * and the current status quo is unlikely to change. Guardians below are |
| 6268 | * supposed to let us know if the assumption becomes false. |
| 6269 | */ |
| 6270 | BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32)); |
| 6271 | BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32)); |
| 6272 | BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64)); |
| 6273 | |
Junaid Shahid | 28a1f3a | 2018-08-14 10:15:34 -0700 | [diff] [blame] | 6274 | kvm_mmu_reset_all_pte_masks(); |
Junaid Shahid | f160c7b | 2016-12-06 16:46:16 -0800 | [diff] [blame] | 6275 | |
Kai Huang | 7b6f8a0 | 2019-05-03 03:08:52 -0700 | [diff] [blame] | 6276 | kvm_set_mmio_spte_mask(); |
| 6277 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 6278 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", |
| 6279 | sizeof(struct pte_list_desc), |
Shakeel Butt | 46bea48 | 2017-10-05 18:07:24 -0700 | [diff] [blame] | 6280 | 0, SLAB_ACCOUNT, NULL); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 6281 | if (!pte_list_desc_cache) |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6282 | goto out; |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6283 | |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 6284 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", |
| 6285 | sizeof(struct kvm_mmu_page), |
Shakeel Butt | 46bea48 | 2017-10-05 18:07:24 -0700 | [diff] [blame] | 6286 | 0, SLAB_ACCOUNT, NULL); |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 6287 | if (!mmu_page_header_cache) |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6288 | goto out; |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 6289 | |
Tejun Heo | 908c7f1 | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 6290 | if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6291 | goto out; |
Wei Yongjun | 45bf21a | 2010-08-23 16:13:15 +0800 | [diff] [blame] | 6292 | |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6293 | ret = register_shrinker(&mmu_shrinker); |
| 6294 | if (ret) |
| 6295 | goto out; |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6296 | |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6297 | return 0; |
| 6298 | |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6299 | out: |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 6300 | mmu_destroy_caches(); |
Arnd Bergmann | ab271bd | 2018-01-10 17:26:59 +0100 | [diff] [blame] | 6301 | return ret; |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 6302 | } |
| 6303 | |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6304 | /* |
Peng Hao | 39337ad | 2018-10-04 11:45:00 -0400 | [diff] [blame] | 6305 | * Calculate mmu pages needed for kvm. |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6306 | */ |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 6307 | unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6308 | { |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 6309 | unsigned long nr_mmu_pages; |
| 6310 | unsigned long nr_pages = 0; |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 6311 | struct kvm_memslots *slots; |
Xiao Guangrong | be6ba0f | 2011-11-24 17:39:18 +0800 | [diff] [blame] | 6312 | struct kvm_memory_slot *memslot; |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 6313 | int i; |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6314 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 6315 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { |
| 6316 | slots = __kvm_memslots(kvm, i); |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 6317 | |
Paolo Bonzini | 9da0e4d | 2015-05-18 13:33:16 +0200 | [diff] [blame] | 6318 | kvm_for_each_memslot(memslot, slots) |
| 6319 | nr_pages += memslot->npages; |
| 6320 | } |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6321 | |
| 6322 | nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; |
Ben Gardon | bc8a3d8 | 2019-04-08 11:07:30 -0700 | [diff] [blame] | 6323 | nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 6324 | |
| 6325 | return nr_mmu_pages; |
| 6326 | } |
| 6327 | |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 6328 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu) |
| 6329 | { |
Paolo Bonzini | 95f93af | 2013-10-02 16:56:12 +0200 | [diff] [blame] | 6330 | kvm_mmu_unload(vcpu); |
Jiří Paleček | 1cfff4d | 2019-06-22 19:42:04 +0200 | [diff] [blame] | 6331 | free_mmu_pages(&vcpu->arch.root_mmu); |
| 6332 | free_mmu_pages(&vcpu->arch.guest_mmu); |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 6333 | mmu_free_memory_caches(vcpu); |
Xiao Guangrong | b034cf0 | 2010-12-23 16:08:35 +0800 | [diff] [blame] | 6334 | } |
| 6335 | |
Xiao Guangrong | b034cf0 | 2010-12-23 16:08:35 +0800 | [diff] [blame] | 6336 | void kvm_mmu_module_exit(void) |
| 6337 | { |
| 6338 | mmu_destroy_caches(); |
| 6339 | percpu_counter_destroy(&kvm_total_used_mmu_pages); |
| 6340 | unregister_shrinker(&mmu_shrinker); |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 6341 | mmu_audit_disable(); |
| 6342 | } |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6343 | |
| 6344 | static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp) |
| 6345 | { |
| 6346 | unsigned int old_val; |
| 6347 | int err; |
| 6348 | |
| 6349 | old_val = nx_huge_pages_recovery_ratio; |
| 6350 | err = param_set_uint(val, kp); |
| 6351 | if (err) |
| 6352 | return err; |
| 6353 | |
| 6354 | if (READ_ONCE(nx_huge_pages) && |
| 6355 | !old_val && nx_huge_pages_recovery_ratio) { |
| 6356 | struct kvm *kvm; |
| 6357 | |
| 6358 | mutex_lock(&kvm_lock); |
| 6359 | |
| 6360 | list_for_each_entry(kvm, &vm_list, vm_list) |
| 6361 | wake_up_process(kvm->arch.nx_lpage_recovery_thread); |
| 6362 | |
| 6363 | mutex_unlock(&kvm_lock); |
| 6364 | } |
| 6365 | |
| 6366 | return err; |
| 6367 | } |
| 6368 | |
| 6369 | static void kvm_recover_nx_lpages(struct kvm *kvm) |
| 6370 | { |
| 6371 | int rcu_idx; |
| 6372 | struct kvm_mmu_page *sp; |
| 6373 | unsigned int ratio; |
| 6374 | LIST_HEAD(invalid_list); |
| 6375 | ulong to_zap; |
| 6376 | |
| 6377 | rcu_idx = srcu_read_lock(&kvm->srcu); |
| 6378 | spin_lock(&kvm->mmu_lock); |
| 6379 | |
| 6380 | ratio = READ_ONCE(nx_huge_pages_recovery_ratio); |
| 6381 | to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; |
Sean Christopherson | 7d919c7 | 2020-09-23 11:37:29 -0700 | [diff] [blame] | 6382 | for ( ; to_zap; --to_zap) { |
| 6383 | if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) |
| 6384 | break; |
| 6385 | |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6386 | /* |
| 6387 | * We use a separate list instead of just using active_mmu_pages |
| 6388 | * because the number of lpage_disallowed pages is expected to |
| 6389 | * be relatively small compared to the total. |
| 6390 | */ |
| 6391 | sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages, |
| 6392 | struct kvm_mmu_page, |
| 6393 | lpage_disallowed_link); |
| 6394 | WARN_ON_ONCE(!sp->lpage_disallowed); |
| 6395 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
| 6396 | WARN_ON_ONCE(sp->lpage_disallowed); |
| 6397 | |
Sean Christopherson | 7d919c7 | 2020-09-23 11:37:29 -0700 | [diff] [blame] | 6398 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6399 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Sean Christopherson | 7d919c7 | 2020-09-23 11:37:29 -0700 | [diff] [blame] | 6400 | cond_resched_lock(&kvm->mmu_lock); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6401 | } |
| 6402 | } |
Sean Christopherson | e895056 | 2020-09-23 11:37:28 -0700 | [diff] [blame] | 6403 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Junaid Shahid | 1aa9b95 | 2019-11-04 20:26:00 +0100 | [diff] [blame] | 6404 | |
| 6405 | spin_unlock(&kvm->mmu_lock); |
| 6406 | srcu_read_unlock(&kvm->srcu, rcu_idx); |
| 6407 | } |
| 6408 | |
| 6409 | static long get_nx_lpage_recovery_timeout(u64 start_time) |
| 6410 | { |
| 6411 | return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio) |
| 6412 | ? start_time + 60 * HZ - get_jiffies_64() |
| 6413 | : MAX_SCHEDULE_TIMEOUT; |
| 6414 | } |
| 6415 | |
| 6416 | static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) |
| 6417 | { |
| 6418 | u64 start_time; |
| 6419 | long remaining_time; |
| 6420 | |
| 6421 | while (true) { |
| 6422 | start_time = get_jiffies_64(); |
| 6423 | remaining_time = get_nx_lpage_recovery_timeout(start_time); |
| 6424 | |
| 6425 | set_current_state(TASK_INTERRUPTIBLE); |
| 6426 | while (!kthread_should_stop() && remaining_time > 0) { |
| 6427 | schedule_timeout(remaining_time); |
| 6428 | remaining_time = get_nx_lpage_recovery_timeout(start_time); |
| 6429 | set_current_state(TASK_INTERRUPTIBLE); |
| 6430 | } |
| 6431 | |
| 6432 | set_current_state(TASK_RUNNING); |
| 6433 | |
| 6434 | if (kthread_should_stop()) |
| 6435 | return 0; |
| 6436 | |
| 6437 | kvm_recover_nx_lpages(kvm); |
| 6438 | } |
| 6439 | } |
| 6440 | |
| 6441 | int kvm_mmu_post_init_vm(struct kvm *kvm) |
| 6442 | { |
| 6443 | int err; |
| 6444 | |
| 6445 | err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0, |
| 6446 | "kvm-nx-lpage-recovery", |
| 6447 | &kvm->arch.nx_lpage_recovery_thread); |
| 6448 | if (!err) |
| 6449 | kthread_unpark(kvm->arch.nx_lpage_recovery_thread); |
| 6450 | |
| 6451 | return err; |
| 6452 | } |
| 6453 | |
| 6454 | void kvm_mmu_pre_destroy_vm(struct kvm *kvm) |
| 6455 | { |
| 6456 | if (kvm->arch.nx_lpage_recovery_thread) |
| 6457 | kthread_stop(kvm->arch.nx_lpage_recovery_thread); |
| 6458 | } |