blob: 09782a982785d332ab5f697c5bab923d203ffa08 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
Avi Kivity6aa8b732006-12-10 02:21:36 -080032 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
Avi Kivityc7addb92007-09-16 18:58:32 +020033 #define PT_LEVEL_BITS PT64_LEVEL_BITS
Avi Kivitycea0f0e2007-01-05 16:36:43 -080034 #ifdef CONFIG_X86_64
35 #define PT_MAX_FULL_LEVELS 4
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050036 #define CMPXCHG cmpxchg
Avi Kivitycea0f0e2007-01-05 16:36:43 -080037 #else
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050038 #define CMPXCHG cmpxchg64
Avi Kivitycea0f0e2007-01-05 16:36:43 -080039 #define PT_MAX_FULL_LEVELS 2
40 #endif
Avi Kivity6aa8b732006-12-10 02:21:36 -080041#elif PTTYPE == 32
42 #define pt_element_t u32
43 #define guest_walker guest_walker32
44 #define FNAME(name) paging##32_##name
45 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
46 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
47 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
Avi Kivity6aa8b732006-12-10 02:21:36 -080048 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
Avi Kivityc7addb92007-09-16 18:58:32 +020049 #define PT_LEVEL_BITS PT32_LEVEL_BITS
Avi Kivitycea0f0e2007-01-05 16:36:43 -080050 #define PT_MAX_FULL_LEVELS 2
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050051 #define CMPXCHG cmpxchg
Avi Kivity6aa8b732006-12-10 02:21:36 -080052#else
53 #error Invalid PTTYPE value
54#endif
55
Avi Kivity5fb07dd2007-11-21 12:35:07 +020056#define gpte_to_gfn FNAME(gpte_to_gfn)
57#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
58
Avi Kivity6aa8b732006-12-10 02:21:36 -080059/*
60 * The guest_walker structure emulates the behavior of the hardware page
61 * table walker.
62 */
63struct guest_walker {
64 int level;
Avi Kivitycea0f0e2007-01-05 16:36:43 -080065 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
Marcelo Tosatti78190262007-12-11 19:12:27 -050066 pt_element_t ptes[PT_MAX_FULL_LEVELS];
67 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
Avi Kivityfe135d22007-12-09 16:15:46 +020068 unsigned pt_access;
69 unsigned pte_access;
Avi Kivity815af8d2007-01-05 16:36:44 -080070 gfn_t gfn;
Avi Kivity7993ba42007-01-26 00:56:41 -080071 u32 error_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -080072};
73
Avi Kivity5fb07dd2007-11-21 12:35:07 +020074static gfn_t gpte_to_gfn(pt_element_t gpte)
75{
76 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
77}
78
79static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
80{
81 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
82}
83
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050084static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
85 gfn_t table_gfn, unsigned index,
86 pt_element_t orig_pte, pt_element_t new_pte)
87{
88 pt_element_t ret;
89 pt_element_t *table;
90 struct page *page;
91
92 page = gfn_to_page(kvm, table_gfn);
Izik Eidus72dc67a2008-02-10 18:04:15 +020093
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050094 table = kmap_atomic(page, KM_USER0);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050095 ret = CMPXCHG(&table[index], orig_pte, new_pte);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050096 kunmap_atomic(table, KM_USER0);
97
98 kvm_release_page_dirty(page);
99
100 return (ret != orig_pte);
101}
102
Avi Kivitybedbe4e2007-12-09 16:52:56 +0200103static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
104{
105 unsigned access;
106
107 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
108#if PTTYPE == 64
109 if (is_nx(vcpu))
110 access &= ~(gpte >> PT64_NX_SHIFT);
111#endif
112 return access;
113}
114
Avi Kivityac79c972007-01-05 16:36:40 -0800115/*
116 * Fetch a guest pte for a guest virtual address
117 */
Avi Kivity7993ba42007-01-26 00:56:41 -0800118static int FNAME(walk_addr)(struct guest_walker *walker,
119 struct kvm_vcpu *vcpu, gva_t addr,
Avi Kivity73b10872007-01-26 00:56:41 -0800120 int write_fault, int user_fault, int fetch_fault)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800121{
Avi Kivity42bf3f02007-10-17 12:18:47 +0200122 pt_element_t pte;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800123 gfn_t table_gfn;
Avi Kivityfe135d22007-12-09 16:15:46 +0200124 unsigned index, pt_access, pte_access;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200125 gpa_t pte_gpa;
Dong, Eddie82725b22009-03-30 16:21:08 +0800126 int rsvd_fault = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800127
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800128 pgprintk("%s: addr %lx\n", __func__, addr);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500129walk:
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800130 walker->level = vcpu->arch.mmu.root_level;
131 pte = vcpu->arch.cr3;
Avi Kivity1b0973b2007-01-05 16:36:41 -0800132#if PTTYPE == 64
133 if (!is_long_mode(vcpu)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800134 pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
Avi Kivity42bf3f02007-10-17 12:18:47 +0200135 if (!is_present_pte(pte))
Avi Kivity7993ba42007-01-26 00:56:41 -0800136 goto not_present;
Avi Kivity1b0973b2007-01-05 16:36:41 -0800137 --walker->level;
138 }
139#endif
Avi Kivitya9058ec2006-12-29 16:49:37 -0800140 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
Marcelo Tosatti24993d52008-02-14 21:25:39 -0200141 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800142
Avi Kivityfe135d22007-12-09 16:15:46 +0200143 pt_access = ACC_ALL;
Avi Kivityac79c972007-01-05 16:36:40 -0800144
145 for (;;) {
Avi Kivity42bf3f02007-10-17 12:18:47 +0200146 index = PT_INDEX(addr, walker->level);
Avi Kivityac79c972007-01-05 16:36:40 -0800147
Avi Kivity5fb07dd2007-11-21 12:35:07 +0200148 table_gfn = gpte_to_gfn(pte);
Avi Kivity1755fbc2007-11-21 14:44:45 +0200149 pte_gpa = gfn_to_gpa(table_gfn);
Izik Eidusec8d4ea2007-11-19 11:28:19 +0200150 pte_gpa += index * sizeof(pt_element_t);
Avi Kivity42bf3f02007-10-17 12:18:47 +0200151 walker->table_gfn[walker->level - 1] = table_gfn;
Marcelo Tosatti78190262007-12-11 19:12:27 -0500152 walker->pte_gpa[walker->level - 1] = pte_gpa;
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800153 pgprintk("%s: table_gfn[%d] %lx\n", __func__,
Avi Kivity42bf3f02007-10-17 12:18:47 +0200154 walker->level - 1, table_gfn);
Avi Kivityac79c972007-01-05 16:36:40 -0800155
Izik Eidusec8d4ea2007-11-19 11:28:19 +0200156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
Avi Kivity42bf3f02007-10-17 12:18:47 +0200157
158 if (!is_present_pte(pte))
Avi Kivity7993ba42007-01-26 00:56:41 -0800159 goto not_present;
160
Dong, Eddie82725b22009-03-30 16:21:08 +0800161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
162 if (rsvd_fault)
163 goto access_error;
164
Avi Kivity42bf3f02007-10-17 12:18:47 +0200165 if (write_fault && !is_writeble_pte(pte))
Avi Kivity7993ba42007-01-26 00:56:41 -0800166 if (user_fault || is_write_protection(vcpu))
167 goto access_error;
168
Avi Kivity42bf3f02007-10-17 12:18:47 +0200169 if (user_fault && !(pte & PT_USER_MASK))
Avi Kivity7993ba42007-01-26 00:56:41 -0800170 goto access_error;
171
Avi Kivity73b10872007-01-26 00:56:41 -0800172#if PTTYPE == 64
Avi Kivity42bf3f02007-10-17 12:18:47 +0200173 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
Avi Kivity73b10872007-01-26 00:56:41 -0800174 goto access_error;
175#endif
176
Avi Kivity42bf3f02007-10-17 12:18:47 +0200177 if (!(pte & PT_ACCESSED_MASK)) {
Avi Kivitybf3f8e82007-02-19 14:37:46 +0200178 mark_page_dirty(vcpu->kvm, table_gfn);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500179 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
180 index, pte, pte|PT_ACCESSED_MASK))
181 goto walk;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200182 pte |= PT_ACCESSED_MASK;
Avi Kivitybf3f8e82007-02-19 14:37:46 +0200183 }
Avi Kivityac79c972007-01-05 16:36:40 -0800184
Avi Kivitybedbe4e2007-12-09 16:52:56 +0200185 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
Avi Kivityfe135d22007-12-09 16:15:46 +0200186
Marcelo Tosatti78190262007-12-11 19:12:27 -0500187 walker->ptes[walker->level - 1] = pte;
188
Avi Kivity815af8d2007-01-05 16:36:44 -0800189 if (walker->level == PT_PAGE_TABLE_LEVEL) {
Avi Kivity5fb07dd2007-11-21 12:35:07 +0200190 walker->gfn = gpte_to_gfn(pte);
Avi Kivity815af8d2007-01-05 16:36:44 -0800191 break;
192 }
193
194 if (walker->level == PT_DIRECTORY_LEVEL
Avi Kivity42bf3f02007-10-17 12:18:47 +0200195 && (pte & PT_PAGE_SIZE_MASK)
Avi Kivity815af8d2007-01-05 16:36:44 -0800196 && (PTTYPE == 64 || is_pse(vcpu))) {
Avi Kivity5fb07dd2007-11-21 12:35:07 +0200197 walker->gfn = gpte_to_gfn_pde(pte);
Avi Kivity815af8d2007-01-05 16:36:44 -0800198 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
Avi Kivityda9285212007-11-21 13:54:47 +0200199 if (PTTYPE == 32 && is_cpuid_PSE36())
200 walker->gfn += pse36_gfn_delta(pte);
Avi Kivity815af8d2007-01-05 16:36:44 -0800201 break;
202 }
203
Avi Kivityfe135d22007-12-09 16:15:46 +0200204 pt_access = pte_access;
Avi Kivityac79c972007-01-05 16:36:40 -0800205 --walker->level;
206 }
Avi Kivity42bf3f02007-10-17 12:18:47 +0200207
208 if (write_fault && !is_dirty_pte(pte)) {
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500209 bool ret;
210
Avi Kivity42bf3f02007-10-17 12:18:47 +0200211 mark_page_dirty(vcpu->kvm, table_gfn);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500212 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
213 pte|PT_DIRTY_MASK);
214 if (ret)
215 goto walk;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200216 pte |= PT_DIRTY_MASK;
Marcelo Tosatti78190262007-12-11 19:12:27 -0500217 walker->ptes[walker->level - 1] = pte;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200218 }
219
Avi Kivityfe135d22007-12-09 16:15:46 +0200220 walker->pt_access = pt_access;
221 walker->pte_access = pte_access;
222 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800223 __func__, (u64)pte, pt_access, pte_access);
Avi Kivity7993ba42007-01-26 00:56:41 -0800224 return 1;
225
226not_present:
227 walker->error_code = 0;
228 goto err;
229
230access_error:
231 walker->error_code = PFERR_PRESENT_MASK;
232
233err:
234 if (write_fault)
235 walker->error_code |= PFERR_WRITE_MASK;
236 if (user_fault)
237 walker->error_code |= PFERR_USER_MASK;
Avi Kivity73b10872007-01-26 00:56:41 -0800238 if (fetch_fault)
239 walker->error_code |= PFERR_FETCH_MASK;
Dong, Eddie82725b22009-03-30 16:21:08 +0800240 if (rsvd_fault)
241 walker->error_code |= PFERR_RSVD_MASK;
Shaohua Life5518812007-07-23 14:51:39 +0800242 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800243}
244
Avi Kivity00284252007-05-01 16:53:31 +0300245static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
Dong, Eddie489f1d62008-01-07 11:14:20 +0200246 u64 *spte, const void *pte)
Avi Kivity00284252007-05-01 16:53:31 +0300247{
248 pt_element_t gpte;
Avi Kivity41074d02007-12-09 17:00:02 +0200249 unsigned pte_access;
Anthony Liguori35149e22008-04-02 14:46:56 -0500250 pfn_t pfn;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300251 int largepage = vcpu->arch.update_pte.largepage;
Avi Kivity00284252007-05-01 16:53:31 +0300252
Avi Kivity00284252007-05-01 16:53:31 +0300253 gpte = *(const pt_element_t *)pte;
Avi Kivityc7addb92007-09-16 18:58:32 +0200254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
Dong, Eddie489f1d62008-01-07 11:14:20 +0200255 if (!is_present_pte(gpte))
Avi Kivityc7addb92007-09-16 18:58:32 +0200256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
257 return;
258 }
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800259 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
Avi Kivity41074d02007-12-09 17:00:02 +0200260 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
Avi Kivityd7824ff2007-12-30 12:29:05 +0200261 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
262 return;
Anthony Liguori35149e22008-04-02 14:46:56 -0500263 pfn = vcpu->arch.update_pte.pfn;
264 if (is_error_pfn(pfn))
Avi Kivityd7824ff2007-12-30 12:29:05 +0200265 return;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200266 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
267 return;
Anthony Liguori35149e22008-04-02 14:46:56 -0500268 kvm_get_pfn(pfn);
Avi Kivity1c4f1fd2007-12-09 17:40:31 +0200269 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
Marcelo Tosatti6cffe8c2008-12-01 22:32:04 -0200270 gpte & PT_DIRTY_MASK, NULL, largepage,
271 gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte),
Anthony Liguori35149e22008-04-02 14:46:56 -0500272 pfn, true);
Avi Kivity00284252007-05-01 16:53:31 +0300273}
274
Avi Kivity6aa8b732006-12-10 02:21:36 -0800275/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800276 * Fetch a shadow pte for a specific level in the paging hierarchy.
277 */
278static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
Avi Kivitye7a04c92008-12-25 15:10:50 +0200279 struct guest_walker *gw,
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300280 int user_fault, int write_fault, int largepage,
Anthony Liguori35149e22008-04-02 14:46:56 -0500281 int *ptwrite, pfn_t pfn)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800282{
Avi Kivitye7a04c92008-12-25 15:10:50 +0200283 unsigned access = gw->pt_access;
284 struct kvm_mmu_page *shadow_page;
285 u64 spte, *sptep;
Avi Kivityf6e2c02b2009-01-11 13:02:10 +0200286 int direct;
Avi Kivitye7a04c92008-12-25 15:10:50 +0200287 gfn_t table_gfn;
288 int r;
289 int level;
290 pt_element_t curr_pte;
291 struct kvm_shadow_walk_iterator iterator;
Avi Kivityac79c972007-01-05 16:36:40 -0800292
Avi Kivitye7a04c92008-12-25 15:10:50 +0200293 if (!is_present_pte(gw->ptes[gw->level - 1]))
Avi Kivityac79c972007-01-05 16:36:40 -0800294 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800295
Avi Kivitye7a04c92008-12-25 15:10:50 +0200296 for_each_shadow_entry(vcpu, addr, iterator) {
297 level = iterator.level;
298 sptep = iterator.sptep;
299 if (level == PT_PAGE_TABLE_LEVEL
300 || (largepage && level == PT_DIRECTORY_LEVEL)) {
301 mmu_set_spte(vcpu, sptep, access,
302 gw->pte_access & access,
303 user_fault, write_fault,
304 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
305 ptwrite, largepage,
306 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
307 gw->gfn, pfn, false);
308 break;
309 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800310
Avi Kivitye7a04c92008-12-25 15:10:50 +0200311 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
312 continue;
313
314 if (is_large_pte(*sptep)) {
Joerg Roedelc5bc2242009-02-19 12:18:56 +0100315 rmap_remove(vcpu->kvm, sptep);
Avi Kivitye7a04c92008-12-25 15:10:50 +0200316 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
317 kvm_flush_remote_tlbs(vcpu->kvm);
Avi Kivitye7a04c92008-12-25 15:10:50 +0200318 }
319
320 if (level == PT_DIRECTORY_LEVEL
321 && gw->level == PT_DIRECTORY_LEVEL) {
Avi Kivityf6e2c02b2009-01-11 13:02:10 +0200322 direct = 1;
Avi Kivitye7a04c92008-12-25 15:10:50 +0200323 if (!is_dirty_pte(gw->ptes[level - 1]))
324 access &= ~ACC_WRITE_MASK;
325 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
326 } else {
Avi Kivityf6e2c02b2009-01-11 13:02:10 +0200327 direct = 0;
Avi Kivitye7a04c92008-12-25 15:10:50 +0200328 table_gfn = gw->table_gfn[level - 2];
329 }
330 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
Avi Kivityf6e2c02b2009-01-11 13:02:10 +0200331 direct, access, sptep);
332 if (!direct) {
Avi Kivitye7a04c92008-12-25 15:10:50 +0200333 r = kvm_read_guest_atomic(vcpu->kvm,
334 gw->pte_gpa[level - 2],
335 &curr_pte, sizeof(curr_pte));
336 if (r || curr_pte != gw->ptes[level - 2]) {
337 kvm_mmu_put_page(shadow_page, sptep);
338 kvm_release_pfn_clean(pfn);
339 sptep = NULL;
340 break;
341 }
342 }
343
344 spte = __pa(shadow_page->spt)
345 | PT_PRESENT_MASK | PT_ACCESSED_MASK
346 | PT_WRITABLE_MASK | PT_USER_MASK;
347 *sptep = spte;
348 }
349
350 return sptep;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800351}
352
353/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800354 * Page fault handler. There are several causes for a page fault:
355 * - there is no shadow pte for the guest pte
356 * - write access through a shadow pte marked read only so that we can set
357 * the dirty bit
358 * - write access to a shadow pte marked read only so we can update the page
359 * dirty bitmap, when userspace requests it
360 * - mmio access; in this case we will never install a present shadow pte
361 * - normal guest page fault due to the guest pte marked not present, not
362 * writable, or not executable
363 *
Avi Kivitye2dec932007-01-05 16:36:54 -0800364 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
365 * a negative value on error.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800366 */
367static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
368 u32 error_code)
369{
370 int write_fault = error_code & PFERR_WRITE_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800371 int user_fault = error_code & PFERR_USER_MASK;
Avi Kivity73b10872007-01-26 00:56:41 -0800372 int fetch_fault = error_code & PFERR_FETCH_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800373 struct guest_walker walker;
374 u64 *shadow_pte;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800375 int write_pt = 0;
Avi Kivitye2dec932007-01-05 16:36:54 -0800376 int r;
Anthony Liguori35149e22008-04-02 14:46:56 -0500377 pfn_t pfn;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300378 int largepage = 0;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200379 unsigned long mmu_seq;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800380
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800381 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
Avi Kivity37a7d8b2007-01-05 16:36:56 -0800382 kvm_mmu_audit(vcpu, "pre page fault");
Avi Kivity714b93d2007-01-05 16:36:53 -0800383
Avi Kivitye2dec932007-01-05 16:36:54 -0800384 r = mmu_topup_memory_caches(vcpu);
385 if (r)
386 return r;
Avi Kivity714b93d2007-01-05 16:36:53 -0800387
Avi Kivity6aa8b732006-12-10 02:21:36 -0800388 /*
Eddie Donga8b876b2009-03-26 15:28:40 +0800389 * Look up the guest pte for the faulting address.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800390 */
Avi Kivity73b10872007-01-26 00:56:41 -0800391 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
392 fetch_fault);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800393
394 /*
395 * The page is not mapped by the guest. Let the guest handle it.
396 */
Avi Kivity7993ba42007-01-26 00:56:41 -0800397 if (!r) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800398 pgprintk("%s: guest page fault\n", __func__);
Avi Kivity7993ba42007-01-26 00:56:41 -0800399 inject_page_fault(vcpu, addr, walker.error_code);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800400 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800401 return 0;
402 }
403
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300404 if (walker.level == PT_DIRECTORY_LEVEL) {
405 gfn_t large_gfn;
406 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
407 if (is_largepage_backed(vcpu, large_gfn)) {
408 walker.gfn = large_gfn;
409 largepage = 1;
410 }
411 }
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200412 mmu_seq = vcpu->kvm->mmu_notifier_seq;
Marcelo Tosatti4c2155c2008-09-16 20:54:47 -0300413 smp_rmb();
Anthony Liguori35149e22008-04-02 14:46:56 -0500414 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
Avi Kivityd7824ff2007-12-30 12:29:05 +0200415
Avi Kivityd196e342008-01-24 11:44:11 +0200416 /* mmio */
Anthony Liguori35149e22008-04-02 14:46:56 -0500417 if (is_error_pfn(pfn)) {
Avi Kivityebb0e622008-05-20 16:21:58 +0300418 pgprintk("gfn %lx is mmio\n", walker.gfn);
Anthony Liguori35149e22008-04-02 14:46:56 -0500419 kvm_release_pfn_clean(pfn);
Avi Kivityd196e342008-01-24 11:44:11 +0200420 return 1;
421 }
422
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -0500423 spin_lock(&vcpu->kvm->mmu_lock);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200424 if (mmu_notifier_retry(vcpu, mmu_seq))
425 goto out_unlock;
Avi Kivityeb787d12007-12-31 15:27:49 +0200426 kvm_mmu_free_some_pages(vcpu);
Avi Kivity97a0a012007-05-31 15:08:29 +0300427 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
Anthony Liguori35149e22008-04-02 14:46:56 -0500428 largepage, &write_pt, pfn);
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300429
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800430 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
Avi Kivity97a0a012007-05-31 15:08:29 +0300431 shadow_pte, *shadow_pte, write_pt);
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800432
Avi Kivitya25f7e12007-04-30 17:05:38 +0300433 if (!write_pt)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800434 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
Avi Kivitya25f7e12007-04-30 17:05:38 +0300435
Avi Kivity1165f5f2007-04-19 17:27:43 +0300436 ++vcpu->stat.pf_fixed;
Avi Kivity37a7d8b2007-01-05 16:36:56 -0800437 kvm_mmu_audit(vcpu, "post page fault (fixed)");
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -0500438 spin_unlock(&vcpu->kvm->mmu_lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800439
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800440 return write_pt;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200441
442out_unlock:
443 spin_unlock(&vcpu->kvm->mmu_lock);
444 kvm_release_pfn_clean(pfn);
445 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800446}
447
Marcelo Tosattia7052892008-09-23 13:18:35 -0300448static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
449{
Avi Kivitya4619302008-12-25 15:19:00 +0200450 struct kvm_shadow_walk_iterator iterator;
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200451 pt_element_t gpte;
Avi Kivitya4619302008-12-25 15:19:00 +0200452 gpa_t pte_gpa = -1;
453 int level;
454 u64 *sptep;
Andrea Arcangeli4539b352009-03-12 18:18:43 +0100455 int need_flush = 0;
Marcelo Tosattia7052892008-09-23 13:18:35 -0300456
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200457 spin_lock(&vcpu->kvm->mmu_lock);
Avi Kivitya4619302008-12-25 15:19:00 +0200458
459 for_each_shadow_entry(vcpu, gva, iterator) {
460 level = iterator.level;
461 sptep = iterator.sptep;
462
463 /* FIXME: properly handle invlpg on large guest pages */
464 if (level == PT_PAGE_TABLE_LEVEL ||
465 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
466 struct kvm_mmu_page *sp = page_header(__pa(sptep));
467
468 pte_gpa = (sp->gfn << PAGE_SHIFT);
469 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
470
471 if (is_shadow_present_pte(*sptep)) {
472 rmap_remove(vcpu->kvm, sptep);
473 if (is_large_pte(*sptep))
474 --vcpu->kvm->stat.lpages;
Andrea Arcangeli4539b352009-03-12 18:18:43 +0100475 need_flush = 1;
Avi Kivitya4619302008-12-25 15:19:00 +0200476 }
477 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
478 break;
479 }
480
481 if (!is_shadow_present_pte(*sptep))
482 break;
483 }
484
Andrea Arcangeli4539b352009-03-12 18:18:43 +0100485 if (need_flush)
486 kvm_flush_remote_tlbs(vcpu->kvm);
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200487 spin_unlock(&vcpu->kvm->mmu_lock);
Avi Kivitya4619302008-12-25 15:19:00 +0200488
489 if (pte_gpa == -1)
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200490 return;
Avi Kivitya4619302008-12-25 15:19:00 +0200491 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200492 sizeof(pt_element_t)))
493 return;
494 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
495 if (mmu_topup_memory_caches(vcpu))
496 return;
Avi Kivitya4619302008-12-25 15:19:00 +0200497 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200498 sizeof(pt_element_t), 0);
499 }
Marcelo Tosattia7052892008-09-23 13:18:35 -0300500}
501
Avi Kivity6aa8b732006-12-10 02:21:36 -0800502static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
503{
504 struct guest_walker walker;
Avi Kivitye119d112007-02-12 00:54:36 -0800505 gpa_t gpa = UNMAPPED_GVA;
506 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800507
Avi Kivitye119d112007-02-12 00:54:36 -0800508 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800509
Avi Kivitye119d112007-02-12 00:54:36 -0800510 if (r) {
Avi Kivity1755fbc2007-11-21 14:44:45 +0200511 gpa = gfn_to_gpa(walker.gfn);
Avi Kivitye119d112007-02-12 00:54:36 -0800512 gpa |= vaddr & ~PAGE_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800513 }
514
515 return gpa;
516}
517
Avi Kivityc7addb92007-09-16 18:58:32 +0200518static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
519 struct kvm_mmu_page *sp)
520{
Avi Kivityeab9f712008-05-29 14:20:16 +0300521 int i, j, offset, r;
522 pt_element_t pt[256 / sizeof(pt_element_t)];
523 gpa_t pte_gpa;
Avi Kivityc7addb92007-09-16 18:58:32 +0200524
Avi Kivityf6e2c02b2009-01-11 13:02:10 +0200525 if (sp->role.direct
Avi Kivitye5a4c8c2007-11-20 21:39:54 +0200526 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
Avi Kivityc7addb92007-09-16 18:58:32 +0200527 nonpaging_prefetch_page(vcpu, sp);
528 return;
529 }
530
Avi Kivityeab9f712008-05-29 14:20:16 +0300531 pte_gpa = gfn_to_gpa(sp->gfn);
532 if (PTTYPE == 32) {
Avi Kivitye5a4c8c2007-11-20 21:39:54 +0200533 offset = sp->role.quadrant << PT64_LEVEL_BITS;
Avi Kivityeab9f712008-05-29 14:20:16 +0300534 pte_gpa += offset * sizeof(pt_element_t);
535 }
Marcelo Tosatti7ec54582007-12-20 19:18:23 -0500536
Avi Kivityeab9f712008-05-29 14:20:16 +0300537 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
538 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
539 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
540 for (j = 0; j < ARRAY_SIZE(pt); ++j)
541 if (r || is_present_pte(pt[j]))
542 sp->spt[i+j] = shadow_trap_nonpresent_pte;
543 else
544 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
Marcelo Tosatti7ec54582007-12-20 19:18:23 -0500545 }
Avi Kivityc7addb92007-09-16 18:58:32 +0200546}
547
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300548/*
549 * Using the cached information from sp->gfns is safe because:
550 * - The spte has a reference to the struct page, so the pfn for a given gfn
551 * can't change unless all sptes pointing to it are nuked first.
552 * - Alias changes zap the entire shadow cache.
553 */
554static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
555{
556 int i, offset, nr_present;
557
558 offset = nr_present = 0;
559
560 if (PTTYPE == 32)
561 offset = sp->role.quadrant << PT64_LEVEL_BITS;
562
563 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
564 unsigned pte_access;
565 pt_element_t gpte;
566 gpa_t pte_gpa;
567 gfn_t gfn = sp->gfns[i];
568
569 if (!is_shadow_present_pte(sp->spt[i]))
570 continue;
571
572 pte_gpa = gfn_to_gpa(sp->gfn);
573 pte_gpa += (i+offset) * sizeof(pt_element_t);
574
575 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
576 sizeof(pt_element_t)))
577 return -EINVAL;
578
579 if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) ||
580 !(gpte & PT_ACCESSED_MASK)) {
581 u64 nonpresent;
582
583 rmap_remove(vcpu->kvm, &sp->spt[i]);
584 if (is_present_pte(gpte))
585 nonpresent = shadow_trap_nonpresent_pte;
586 else
587 nonpresent = shadow_notrap_nonpresent_pte;
588 set_shadow_pte(&sp->spt[i], nonpresent);
589 continue;
590 }
591
592 nr_present++;
593 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
594 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
Marcelo Tosatti6cffe8c2008-12-01 22:32:04 -0200595 is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn,
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300596 spte_to_pfn(sp->spt[i]), true, false);
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300597 }
598
599 return !nr_present;
600}
601
Avi Kivity6aa8b732006-12-10 02:21:36 -0800602#undef pt_element_t
603#undef guest_walker
604#undef FNAME
605#undef PT_BASE_ADDR_MASK
606#undef PT_INDEX
Avi Kivity6aa8b732006-12-10 02:21:36 -0800607#undef PT_LEVEL_MASK
Avi Kivity6aa8b732006-12-10 02:21:36 -0800608#undef PT_DIR_BASE_ADDR_MASK
Avi Kivityc7addb92007-09-16 18:58:32 +0200609#undef PT_LEVEL_BITS
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800610#undef PT_MAX_FULL_LEVELS
Avi Kivity5fb07dd2007-11-21 12:35:07 +0200611#undef gpte_to_gfn
612#undef gpte_to_gfn_pde
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500613#undef CMPXCHG