blob: 96d61daa23ba4d949c60565348b0f557b8b92cf8 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050022#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050023#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050024#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050025#include <asm/kvm_arm.h>
26#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050027#include <asm/kvm_mmio.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050028#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050029#include <asm/kvm_emulate.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050030
31#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050032
33extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
34
35static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
36
Marc Zyngier48762762013-01-28 15:27:00 +000037static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
Christoffer Dalld5d81842013-01-20 18:28:07 -050038{
Marc Zyngier48762762013-01-28 15:27:00 +000039 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
Christoffer Dalld5d81842013-01-20 18:28:07 -050040}
41
Christoffer Dalld5d81842013-01-20 18:28:07 -050042static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
43 int min, int max)
44{
45 void *page;
46
47 BUG_ON(max > KVM_NR_MEM_OBJS);
48 if (cache->nobjs >= min)
49 return 0;
50 while (cache->nobjs < max) {
51 page = (void *)__get_free_page(PGALLOC_GFP);
52 if (!page)
53 return -ENOMEM;
54 cache->objects[cache->nobjs++] = page;
55 }
56 return 0;
57}
58
59static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
60{
61 while (mc->nobjs)
62 free_page((unsigned long)mc->objects[--mc->nobjs]);
63}
64
65static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
66{
67 void *p;
68
69 BUG_ON(!mc || !mc->nobjs);
70 p = mc->objects[--mc->nobjs];
71 return p;
72}
73
Christoffer Dall342cd0a2013-01-20 18:28:06 -050074static void free_ptes(pmd_t *pmd, unsigned long addr)
75{
76 pte_t *pte;
77 unsigned int i;
78
79 for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
80 if (!pmd_none(*pmd) && pmd_table(*pmd)) {
81 pte = pte_offset_kernel(pmd, addr);
82 pte_free_kernel(NULL, pte);
83 }
84 pmd++;
85 }
86}
87
Marc Zyngier000d3992013-03-05 02:43:17 +000088static void free_hyp_pgd_entry(unsigned long addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -050089{
90 pgd_t *pgd;
91 pud_t *pud;
92 pmd_t *pmd;
Marc Zyngier000d3992013-03-05 02:43:17 +000093 unsigned long hyp_addr = KERN_TO_HYP(addr);
94
95 pgd = hyp_pgd + pgd_index(hyp_addr);
96 pud = pud_offset(pgd, hyp_addr);
97
98 if (pud_none(*pud))
99 return;
100 BUG_ON(pud_bad(*pud));
101
102 pmd = pmd_offset(pud, hyp_addr);
103 free_ptes(pmd, addr);
104 pmd_free(NULL, pmd);
105 pud_clear(pud);
106}
107
108/**
109 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
110 *
111 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
112 * either mappings in the kernel memory area (above PAGE_OFFSET), or
113 * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).
114 */
115void free_hyp_pmds(void)
116{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500117 unsigned long addr;
118
119 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier000d3992013-03-05 02:43:17 +0000120 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
121 free_hyp_pgd_entry(addr);
122 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
123 free_hyp_pgd_entry(addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500124 mutex_unlock(&kvm_hyp_pgd_mutex);
125}
126
127static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100128 unsigned long end, unsigned long pfn,
129 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500130{
131 pte_t *pte;
132 unsigned long addr;
133
Marc Zyngier3562c762013-04-12 19:12:02 +0100134 addr = start;
135 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100136 pte = pte_offset_kernel(pmd, addr);
137 kvm_set_pte(pte, pfn_pte(pfn, prot));
138 pfn++;
Marc Zyngier3562c762013-04-12 19:12:02 +0100139 } while (addr += PAGE_SIZE, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500140}
141
142static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100143 unsigned long end, unsigned long pfn,
144 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500145{
146 pmd_t *pmd;
147 pte_t *pte;
148 unsigned long addr, next;
149
Marc Zyngier3562c762013-04-12 19:12:02 +0100150 addr = start;
151 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100152 pmd = pmd_offset(pud, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500153
154 BUG_ON(pmd_sect(*pmd));
155
156 if (pmd_none(*pmd)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100157 pte = pte_alloc_one_kernel(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500158 if (!pte) {
159 kvm_err("Cannot allocate Hyp pte\n");
160 return -ENOMEM;
161 }
162 pmd_populate_kernel(NULL, pmd, pte);
163 }
164
165 next = pmd_addr_end(addr, end);
166
Marc Zyngier6060df82013-04-12 19:12:01 +0100167 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
168 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100169 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500170
171 return 0;
172}
173
Marc Zyngier6060df82013-04-12 19:12:01 +0100174static int __create_hyp_mappings(pgd_t *pgdp,
175 unsigned long start, unsigned long end,
176 unsigned long pfn, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500177{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500178 pgd_t *pgd;
179 pud_t *pud;
180 pmd_t *pmd;
181 unsigned long addr, next;
182 int err = 0;
183
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500184 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier3562c762013-04-12 19:12:02 +0100185 addr = start & PAGE_MASK;
186 end = PAGE_ALIGN(end);
187 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100188 pgd = pgdp + pgd_index(addr);
189 pud = pud_offset(pgd, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500190
191 if (pud_none_or_clear_bad(pud)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100192 pmd = pmd_alloc_one(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500193 if (!pmd) {
194 kvm_err("Cannot allocate Hyp pmd\n");
195 err = -ENOMEM;
196 goto out;
197 }
198 pud_populate(NULL, pud, pmd);
199 }
200
201 next = pgd_addr_end(addr, end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100202 err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500203 if (err)
204 goto out;
Marc Zyngier6060df82013-04-12 19:12:01 +0100205 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100206 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500207out:
208 mutex_unlock(&kvm_hyp_pgd_mutex);
209 return err;
210}
211
212/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100213 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500214 * @from: The virtual kernel start address of the range
215 * @to: The virtual kernel end address of the range (exclusive)
216 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100217 * The same virtual address as the kernel virtual address is also used
218 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
219 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500220 */
221int create_hyp_mappings(void *from, void *to)
222{
Marc Zyngier6060df82013-04-12 19:12:01 +0100223 unsigned long phys_addr = virt_to_phys(from);
224 unsigned long start = KERN_TO_HYP((unsigned long)from);
225 unsigned long end = KERN_TO_HYP((unsigned long)to);
226
227 /* Check for a valid kernel memory mapping */
228 if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
229 return -EINVAL;
230
231 return __create_hyp_mappings(hyp_pgd, start, end,
232 __phys_to_pfn(phys_addr), PAGE_HYP);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500233}
234
235/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100236 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
237 * @from: The kernel start VA of the range
238 * @to: The kernel end VA of the range (exclusive)
Marc Zyngier6060df82013-04-12 19:12:01 +0100239 * @phys_addr: The physical start address which gets mapped
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100240 *
241 * The resulting HYP VA is the same as the kernel VA, modulo
242 * HYP_PAGE_OFFSET.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500243 */
Marc Zyngier6060df82013-04-12 19:12:01 +0100244int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500245{
Marc Zyngier6060df82013-04-12 19:12:01 +0100246 unsigned long start = KERN_TO_HYP((unsigned long)from);
247 unsigned long end = KERN_TO_HYP((unsigned long)to);
248
249 /* Check for a valid kernel IO mapping */
250 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
251 return -EINVAL;
252
253 return __create_hyp_mappings(hyp_pgd, start, end,
254 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500255}
256
Christoffer Dalld5d81842013-01-20 18:28:07 -0500257/**
258 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
259 * @kvm: The KVM struct pointer for the VM.
260 *
261 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
262 * support either full 40-bit input addresses or limited to 32-bit input
263 * addresses). Clears the allocated pages.
264 *
265 * Note we don't need locking here as this is only called when the VM is
266 * created, which can only be done once.
267 */
268int kvm_alloc_stage2_pgd(struct kvm *kvm)
269{
270 pgd_t *pgd;
271
272 if (kvm->arch.pgd != NULL) {
273 kvm_err("kvm_arch already initialized?\n");
274 return -EINVAL;
275 }
276
277 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
278 if (!pgd)
279 return -ENOMEM;
280
281 /* stage-2 pgd must be aligned to its size */
282 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
283
284 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100285 kvm_clean_pgd(pgd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500286 kvm->arch.pgd = pgd;
287
288 return 0;
289}
290
291static void clear_pud_entry(pud_t *pud)
292{
293 pmd_t *pmd_table = pmd_offset(pud, 0);
294 pud_clear(pud);
295 pmd_free(NULL, pmd_table);
296 put_page(virt_to_page(pud));
297}
298
299static void clear_pmd_entry(pmd_t *pmd)
300{
301 pte_t *pte_table = pte_offset_kernel(pmd, 0);
302 pmd_clear(pmd);
303 pte_free_kernel(NULL, pte_table);
304 put_page(virt_to_page(pmd));
305}
306
307static bool pmd_empty(pmd_t *pmd)
308{
309 struct page *pmd_page = virt_to_page(pmd);
310 return page_count(pmd_page) == 1;
311}
312
313static void clear_pte_entry(pte_t *pte)
314{
315 if (pte_present(*pte)) {
316 kvm_set_pte(pte, __pte(0));
317 put_page(virt_to_page(pte));
318 }
319}
320
321static bool pte_empty(pte_t *pte)
322{
323 struct page *pte_page = virt_to_page(pte);
324 return page_count(pte_page) == 1;
325}
326
327/**
328 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
329 * @kvm: The VM pointer
330 * @start: The intermediate physical base address of the range to unmap
331 * @size: The size of the area to unmap
332 *
333 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
334 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
335 * destroying the VM), otherwise another faulting VCPU may come in and mess
336 * with things behind our backs.
337 */
338static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
339{
340 pgd_t *pgd;
341 pud_t *pud;
342 pmd_t *pmd;
343 pte_t *pte;
344 phys_addr_t addr = start, end = start + size;
345 u64 range;
346
347 while (addr < end) {
348 pgd = kvm->arch.pgd + pgd_index(addr);
349 pud = pud_offset(pgd, addr);
350 if (pud_none(*pud)) {
351 addr += PUD_SIZE;
352 continue;
353 }
354
355 pmd = pmd_offset(pud, addr);
356 if (pmd_none(*pmd)) {
357 addr += PMD_SIZE;
358 continue;
359 }
360
361 pte = pte_offset_kernel(pmd, addr);
362 clear_pte_entry(pte);
363 range = PAGE_SIZE;
364
365 /* If we emptied the pte, walk back up the ladder */
366 if (pte_empty(pte)) {
367 clear_pmd_entry(pmd);
368 range = PMD_SIZE;
369 if (pmd_empty(pmd)) {
370 clear_pud_entry(pud);
371 range = PUD_SIZE;
372 }
373 }
374
375 addr += range;
376 }
377}
378
379/**
380 * kvm_free_stage2_pgd - free all stage-2 tables
381 * @kvm: The KVM struct pointer for the VM.
382 *
383 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
384 * underlying level-2 and level-3 tables before freeing the actual level-1 table
385 * and setting the struct pointer to NULL.
386 *
387 * Note we don't need locking here as this is only called when the VM is
388 * destroyed, which can only be done once.
389 */
390void kvm_free_stage2_pgd(struct kvm *kvm)
391{
392 if (kvm->arch.pgd == NULL)
393 return;
394
395 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
396 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
397 kvm->arch.pgd = NULL;
398}
399
400
401static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
402 phys_addr_t addr, const pte_t *new_pte, bool iomap)
403{
404 pgd_t *pgd;
405 pud_t *pud;
406 pmd_t *pmd;
407 pte_t *pte, old_pte;
408
409 /* Create 2nd stage page table mapping - Level 1 */
410 pgd = kvm->arch.pgd + pgd_index(addr);
411 pud = pud_offset(pgd, addr);
412 if (pud_none(*pud)) {
413 if (!cache)
414 return 0; /* ignore calls from kvm_set_spte_hva */
415 pmd = mmu_memory_cache_alloc(cache);
416 pud_populate(NULL, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500417 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100418 }
419
420 pmd = pmd_offset(pud, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500421
422 /* Create 2nd stage page table mapping - Level 2 */
423 if (pmd_none(*pmd)) {
424 if (!cache)
425 return 0; /* ignore calls from kvm_set_spte_hva */
426 pte = mmu_memory_cache_alloc(cache);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100427 kvm_clean_pte(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500428 pmd_populate_kernel(NULL, pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500429 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100430 }
431
432 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500433
434 if (iomap && pte_present(*pte))
435 return -EFAULT;
436
437 /* Create 2nd stage page table mapping - Level 3 */
438 old_pte = *pte;
439 kvm_set_pte(pte, *new_pte);
440 if (pte_present(old_pte))
Marc Zyngier48762762013-01-28 15:27:00 +0000441 kvm_tlb_flush_vmid_ipa(kvm, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500442 else
443 get_page(virt_to_page(pte));
444
445 return 0;
446}
447
448/**
449 * kvm_phys_addr_ioremap - map a device range to guest IPA
450 *
451 * @kvm: The KVM pointer
452 * @guest_ipa: The IPA at which to insert the mapping
453 * @pa: The physical address of the device
454 * @size: The size of the mapping
455 */
456int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
457 phys_addr_t pa, unsigned long size)
458{
459 phys_addr_t addr, end;
460 int ret = 0;
461 unsigned long pfn;
462 struct kvm_mmu_memory_cache cache = { 0, };
463
464 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
465 pfn = __phys_to_pfn(pa);
466
467 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100468 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
469 kvm_set_s2pte_writable(&pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500470
471 ret = mmu_topup_memory_cache(&cache, 2, 2);
472 if (ret)
473 goto out;
474 spin_lock(&kvm->mmu_lock);
475 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
476 spin_unlock(&kvm->mmu_lock);
477 if (ret)
478 goto out;
479
480 pfn++;
481 }
482
483out:
484 mmu_free_memory_cache(&cache);
485 return ret;
486}
487
Christoffer Dall94f8e642013-01-20 18:28:12 -0500488static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
489 gfn_t gfn, struct kvm_memory_slot *memslot,
490 unsigned long fault_status)
491{
492 pte_t new_pte;
493 pfn_t pfn;
494 int ret;
495 bool write_fault, writable;
496 unsigned long mmu_seq;
497 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
498
Marc Zyngier7393b592012-09-17 19:27:09 +0100499 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500500 if (fault_status == FSC_PERM && !write_fault) {
501 kvm_err("Unexpected L2 read permission error\n");
502 return -EFAULT;
503 }
504
505 /* We need minimum second+third level pages */
506 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
507 if (ret)
508 return ret;
509
510 mmu_seq = vcpu->kvm->mmu_notifier_seq;
511 /*
512 * Ensure the read of mmu_notifier_seq happens before we call
513 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
514 * the page we just got a reference to gets unmapped before we have a
515 * chance to grab the mmu_lock, which ensure that if the page gets
516 * unmapped afterwards, the call to kvm_unmap_hva will take it away
517 * from us again properly. This smp_rmb() interacts with the smp_wmb()
518 * in kvm_mmu_notifier_invalidate_<page|range_end>.
519 */
520 smp_rmb();
521
522 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
523 if (is_error_pfn(pfn))
524 return -EFAULT;
525
526 new_pte = pfn_pte(pfn, PAGE_S2);
527 coherent_icache_guest_page(vcpu->kvm, gfn);
528
529 spin_lock(&vcpu->kvm->mmu_lock);
530 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
531 goto out_unlock;
532 if (writable) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100533 kvm_set_s2pte_writable(&new_pte);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500534 kvm_set_pfn_dirty(pfn);
535 }
536 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
537
538out_unlock:
539 spin_unlock(&vcpu->kvm->mmu_lock);
540 kvm_release_pfn_clean(pfn);
541 return 0;
542}
543
544/**
545 * kvm_handle_guest_abort - handles all 2nd stage aborts
546 * @vcpu: the VCPU pointer
547 * @run: the kvm_run structure
548 *
549 * Any abort that gets to the host is almost guaranteed to be caused by a
550 * missing second stage translation table entry, which can mean that either the
551 * guest simply needs more memory and we must allocate an appropriate page or it
552 * can mean that the guest tried to access I/O memory, which is emulated by user
553 * space. The distinction is based on the IPA causing the fault and whether this
554 * memory region has been registered as standard RAM by user space.
555 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500556int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
557{
Christoffer Dall94f8e642013-01-20 18:28:12 -0500558 unsigned long fault_status;
559 phys_addr_t fault_ipa;
560 struct kvm_memory_slot *memslot;
561 bool is_iabt;
562 gfn_t gfn;
563 int ret, idx;
564
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100565 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Marc Zyngier7393b592012-09-17 19:27:09 +0100566 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500567
Marc Zyngier7393b592012-09-17 19:27:09 +0100568 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
569 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500570
571 /* Check the stage-2 fault is trans. fault or write fault */
Marc Zyngier1cc287d2012-09-18 14:14:35 +0100572 fault_status = kvm_vcpu_trap_get_fault(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500573 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100574 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
575 kvm_vcpu_trap_get_class(vcpu), fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500576 return -EFAULT;
577 }
578
579 idx = srcu_read_lock(&vcpu->kvm->srcu);
580
581 gfn = fault_ipa >> PAGE_SHIFT;
582 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
583 if (is_iabt) {
584 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +0100585 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500586 ret = 1;
587 goto out_unlock;
588 }
589
590 if (fault_status != FSC_FAULT) {
591 kvm_err("Unsupported fault status on io memory: %#lx\n",
592 fault_status);
593 ret = -EFAULT;
594 goto out_unlock;
595 }
596
Marc Zyngiercfe39502012-12-12 14:42:09 +0000597 /*
598 * The IPA is reported as [MAX:12], so we need to
599 * complement it with the bottom 12 bits from the
600 * faulting VA. This is always 12 bits, irrespective
601 * of the page size.
602 */
603 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500604 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500605 goto out_unlock;
606 }
607
608 memslot = gfn_to_memslot(vcpu->kvm, gfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500609
610 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
611 if (ret == 0)
612 ret = 1;
613out_unlock:
614 srcu_read_unlock(&vcpu->kvm->srcu, idx);
615 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500616}
617
Christoffer Dalld5d81842013-01-20 18:28:07 -0500618static void handle_hva_to_gpa(struct kvm *kvm,
619 unsigned long start,
620 unsigned long end,
621 void (*handler)(struct kvm *kvm,
622 gpa_t gpa, void *data),
623 void *data)
624{
625 struct kvm_memslots *slots;
626 struct kvm_memory_slot *memslot;
627
628 slots = kvm_memslots(kvm);
629
630 /* we only care about the pages that the guest sees */
631 kvm_for_each_memslot(memslot, slots) {
632 unsigned long hva_start, hva_end;
633 gfn_t gfn, gfn_end;
634
635 hva_start = max(start, memslot->userspace_addr);
636 hva_end = min(end, memslot->userspace_addr +
637 (memslot->npages << PAGE_SHIFT));
638 if (hva_start >= hva_end)
639 continue;
640
641 /*
642 * {gfn(page) | page intersects with [hva_start, hva_end)} =
643 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
644 */
645 gfn = hva_to_gfn_memslot(hva_start, memslot);
646 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
647
648 for (; gfn < gfn_end; ++gfn) {
649 gpa_t gpa = gfn << PAGE_SHIFT;
650 handler(kvm, gpa, data);
651 }
652 }
653}
654
655static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
656{
657 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
Marc Zyngier48762762013-01-28 15:27:00 +0000658 kvm_tlb_flush_vmid_ipa(kvm, gpa);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500659}
660
661int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
662{
663 unsigned long end = hva + PAGE_SIZE;
664
665 if (!kvm->arch.pgd)
666 return 0;
667
668 trace_kvm_unmap_hva(hva);
669 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
670 return 0;
671}
672
673int kvm_unmap_hva_range(struct kvm *kvm,
674 unsigned long start, unsigned long end)
675{
676 if (!kvm->arch.pgd)
677 return 0;
678
679 trace_kvm_unmap_hva_range(start, end);
680 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
681 return 0;
682}
683
684static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
685{
686 pte_t *pte = (pte_t *)data;
687
688 stage2_set_pte(kvm, NULL, gpa, pte, false);
689}
690
691
692void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
693{
694 unsigned long end = hva + PAGE_SIZE;
695 pte_t stage2_pte;
696
697 if (!kvm->arch.pgd)
698 return;
699
700 trace_kvm_set_spte_hva(hva);
701 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
702 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
703}
704
705void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
706{
707 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
708}
709
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500710phys_addr_t kvm_mmu_get_httbr(void)
711{
712 VM_BUG_ON(!virt_addr_valid(hyp_pgd));
713 return virt_to_phys(hyp_pgd);
714}
715
716int kvm_mmu_init(void)
717{
Christoffer Dalld5d81842013-01-20 18:28:07 -0500718 if (!hyp_pgd) {
719 kvm_err("Hyp mode PGD not allocated\n");
720 return -ENOMEM;
721 }
722
723 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500724}
725
726/**
727 * kvm_clear_idmap - remove all idmaps from the hyp pgd
728 *
729 * Free the underlying pmds for all pgds in range and clear the pgds (but
730 * don't free them) afterwards.
731 */
732void kvm_clear_hyp_idmap(void)
733{
734 unsigned long addr, end;
735 unsigned long next;
736 pgd_t *pgd = hyp_pgd;
737 pud_t *pud;
738 pmd_t *pmd;
739
740 addr = virt_to_phys(__hyp_idmap_text_start);
741 end = virt_to_phys(__hyp_idmap_text_end);
742
743 pgd += pgd_index(addr);
744 do {
745 next = pgd_addr_end(addr, end);
746 if (pgd_none_or_clear_bad(pgd))
747 continue;
748 pud = pud_offset(pgd, addr);
749 pmd = pmd_offset(pud, addr);
750
751 pud_clear(pud);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100752 kvm_clean_pmd_entry(pmd);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500753 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
754 } while (pgd++, addr = next, addr < end);
755}