blob: bfc59279de1bbcc22a1fe091e11df3e7fe490ace [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050022#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050023#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050024#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050025#include <asm/kvm_arm.h>
26#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050027#include <asm/kvm_mmio.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050028#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050029#include <asm/kvm_emulate.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050030
31#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050032
33extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
34
Marc Zyngier2fb41052013-04-12 19:12:03 +010035static pgd_t *hyp_pgd;
Christoffer Dall342cd0a2013-01-20 18:28:06 -050036static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
37
Marc Zyngier48762762013-01-28 15:27:00 +000038static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
Christoffer Dalld5d81842013-01-20 18:28:07 -050039{
Marc Zyngier48762762013-01-28 15:27:00 +000040 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
Christoffer Dalld5d81842013-01-20 18:28:07 -050041}
42
Christoffer Dalld5d81842013-01-20 18:28:07 -050043static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
44 int min, int max)
45{
46 void *page;
47
48 BUG_ON(max > KVM_NR_MEM_OBJS);
49 if (cache->nobjs >= min)
50 return 0;
51 while (cache->nobjs < max) {
52 page = (void *)__get_free_page(PGALLOC_GFP);
53 if (!page)
54 return -ENOMEM;
55 cache->objects[cache->nobjs++] = page;
56 }
57 return 0;
58}
59
60static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
61{
62 while (mc->nobjs)
63 free_page((unsigned long)mc->objects[--mc->nobjs]);
64}
65
66static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
67{
68 void *p;
69
70 BUG_ON(!mc || !mc->nobjs);
71 p = mc->objects[--mc->nobjs];
72 return p;
73}
74
Christoffer Dall342cd0a2013-01-20 18:28:06 -050075static void free_ptes(pmd_t *pmd, unsigned long addr)
76{
77 pte_t *pte;
78 unsigned int i;
79
80 for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
81 if (!pmd_none(*pmd) && pmd_table(*pmd)) {
82 pte = pte_offset_kernel(pmd, addr);
83 pte_free_kernel(NULL, pte);
84 }
85 pmd++;
86 }
87}
88
Marc Zyngier000d3992013-03-05 02:43:17 +000089static void free_hyp_pgd_entry(unsigned long addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -050090{
91 pgd_t *pgd;
92 pud_t *pud;
93 pmd_t *pmd;
Marc Zyngier000d3992013-03-05 02:43:17 +000094 unsigned long hyp_addr = KERN_TO_HYP(addr);
95
96 pgd = hyp_pgd + pgd_index(hyp_addr);
97 pud = pud_offset(pgd, hyp_addr);
98
99 if (pud_none(*pud))
100 return;
101 BUG_ON(pud_bad(*pud));
102
103 pmd = pmd_offset(pud, hyp_addr);
104 free_ptes(pmd, addr);
105 pmd_free(NULL, pmd);
106 pud_clear(pud);
107}
108
109/**
110 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
111 *
112 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
113 * either mappings in the kernel memory area (above PAGE_OFFSET), or
114 * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).
115 */
116void free_hyp_pmds(void)
117{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500118 unsigned long addr;
119
120 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier000d3992013-03-05 02:43:17 +0000121 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
122 free_hyp_pgd_entry(addr);
123 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
124 free_hyp_pgd_entry(addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500125 mutex_unlock(&kvm_hyp_pgd_mutex);
126}
127
128static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100129 unsigned long end, unsigned long pfn,
130 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500131{
132 pte_t *pte;
133 unsigned long addr;
134
Marc Zyngier3562c762013-04-12 19:12:02 +0100135 addr = start;
136 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100137 pte = pte_offset_kernel(pmd, addr);
138 kvm_set_pte(pte, pfn_pte(pfn, prot));
139 pfn++;
Marc Zyngier3562c762013-04-12 19:12:02 +0100140 } while (addr += PAGE_SIZE, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500141}
142
143static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100144 unsigned long end, unsigned long pfn,
145 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500146{
147 pmd_t *pmd;
148 pte_t *pte;
149 unsigned long addr, next;
150
Marc Zyngier3562c762013-04-12 19:12:02 +0100151 addr = start;
152 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100153 pmd = pmd_offset(pud, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500154
155 BUG_ON(pmd_sect(*pmd));
156
157 if (pmd_none(*pmd)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100158 pte = pte_alloc_one_kernel(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500159 if (!pte) {
160 kvm_err("Cannot allocate Hyp pte\n");
161 return -ENOMEM;
162 }
163 pmd_populate_kernel(NULL, pmd, pte);
164 }
165
166 next = pmd_addr_end(addr, end);
167
Marc Zyngier6060df82013-04-12 19:12:01 +0100168 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
169 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100170 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500171
172 return 0;
173}
174
Marc Zyngier6060df82013-04-12 19:12:01 +0100175static int __create_hyp_mappings(pgd_t *pgdp,
176 unsigned long start, unsigned long end,
177 unsigned long pfn, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500178{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500179 pgd_t *pgd;
180 pud_t *pud;
181 pmd_t *pmd;
182 unsigned long addr, next;
183 int err = 0;
184
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500185 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier3562c762013-04-12 19:12:02 +0100186 addr = start & PAGE_MASK;
187 end = PAGE_ALIGN(end);
188 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100189 pgd = pgdp + pgd_index(addr);
190 pud = pud_offset(pgd, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500191
192 if (pud_none_or_clear_bad(pud)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100193 pmd = pmd_alloc_one(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500194 if (!pmd) {
195 kvm_err("Cannot allocate Hyp pmd\n");
196 err = -ENOMEM;
197 goto out;
198 }
199 pud_populate(NULL, pud, pmd);
200 }
201
202 next = pgd_addr_end(addr, end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100203 err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500204 if (err)
205 goto out;
Marc Zyngier6060df82013-04-12 19:12:01 +0100206 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100207 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500208out:
209 mutex_unlock(&kvm_hyp_pgd_mutex);
210 return err;
211}
212
213/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100214 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500215 * @from: The virtual kernel start address of the range
216 * @to: The virtual kernel end address of the range (exclusive)
217 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100218 * The same virtual address as the kernel virtual address is also used
219 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
220 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500221 */
222int create_hyp_mappings(void *from, void *to)
223{
Marc Zyngier6060df82013-04-12 19:12:01 +0100224 unsigned long phys_addr = virt_to_phys(from);
225 unsigned long start = KERN_TO_HYP((unsigned long)from);
226 unsigned long end = KERN_TO_HYP((unsigned long)to);
227
228 /* Check for a valid kernel memory mapping */
229 if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
230 return -EINVAL;
231
232 return __create_hyp_mappings(hyp_pgd, start, end,
233 __phys_to_pfn(phys_addr), PAGE_HYP);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500234}
235
236/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100237 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
238 * @from: The kernel start VA of the range
239 * @to: The kernel end VA of the range (exclusive)
Marc Zyngier6060df82013-04-12 19:12:01 +0100240 * @phys_addr: The physical start address which gets mapped
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100241 *
242 * The resulting HYP VA is the same as the kernel VA, modulo
243 * HYP_PAGE_OFFSET.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500244 */
Marc Zyngier6060df82013-04-12 19:12:01 +0100245int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500246{
Marc Zyngier6060df82013-04-12 19:12:01 +0100247 unsigned long start = KERN_TO_HYP((unsigned long)from);
248 unsigned long end = KERN_TO_HYP((unsigned long)to);
249
250 /* Check for a valid kernel IO mapping */
251 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
252 return -EINVAL;
253
254 return __create_hyp_mappings(hyp_pgd, start, end,
255 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500256}
257
Christoffer Dalld5d81842013-01-20 18:28:07 -0500258/**
259 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
260 * @kvm: The KVM struct pointer for the VM.
261 *
262 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
263 * support either full 40-bit input addresses or limited to 32-bit input
264 * addresses). Clears the allocated pages.
265 *
266 * Note we don't need locking here as this is only called when the VM is
267 * created, which can only be done once.
268 */
269int kvm_alloc_stage2_pgd(struct kvm *kvm)
270{
271 pgd_t *pgd;
272
273 if (kvm->arch.pgd != NULL) {
274 kvm_err("kvm_arch already initialized?\n");
275 return -EINVAL;
276 }
277
278 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
279 if (!pgd)
280 return -ENOMEM;
281
282 /* stage-2 pgd must be aligned to its size */
283 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
284
285 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100286 kvm_clean_pgd(pgd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500287 kvm->arch.pgd = pgd;
288
289 return 0;
290}
291
292static void clear_pud_entry(pud_t *pud)
293{
294 pmd_t *pmd_table = pmd_offset(pud, 0);
295 pud_clear(pud);
296 pmd_free(NULL, pmd_table);
297 put_page(virt_to_page(pud));
298}
299
300static void clear_pmd_entry(pmd_t *pmd)
301{
302 pte_t *pte_table = pte_offset_kernel(pmd, 0);
303 pmd_clear(pmd);
304 pte_free_kernel(NULL, pte_table);
305 put_page(virt_to_page(pmd));
306}
307
308static bool pmd_empty(pmd_t *pmd)
309{
310 struct page *pmd_page = virt_to_page(pmd);
311 return page_count(pmd_page) == 1;
312}
313
314static void clear_pte_entry(pte_t *pte)
315{
316 if (pte_present(*pte)) {
317 kvm_set_pte(pte, __pte(0));
318 put_page(virt_to_page(pte));
319 }
320}
321
322static bool pte_empty(pte_t *pte)
323{
324 struct page *pte_page = virt_to_page(pte);
325 return page_count(pte_page) == 1;
326}
327
328/**
329 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
330 * @kvm: The VM pointer
331 * @start: The intermediate physical base address of the range to unmap
332 * @size: The size of the area to unmap
333 *
334 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
335 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
336 * destroying the VM), otherwise another faulting VCPU may come in and mess
337 * with things behind our backs.
338 */
339static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
340{
341 pgd_t *pgd;
342 pud_t *pud;
343 pmd_t *pmd;
344 pte_t *pte;
345 phys_addr_t addr = start, end = start + size;
346 u64 range;
347
348 while (addr < end) {
349 pgd = kvm->arch.pgd + pgd_index(addr);
350 pud = pud_offset(pgd, addr);
351 if (pud_none(*pud)) {
352 addr += PUD_SIZE;
353 continue;
354 }
355
356 pmd = pmd_offset(pud, addr);
357 if (pmd_none(*pmd)) {
358 addr += PMD_SIZE;
359 continue;
360 }
361
362 pte = pte_offset_kernel(pmd, addr);
363 clear_pte_entry(pte);
364 range = PAGE_SIZE;
365
366 /* If we emptied the pte, walk back up the ladder */
367 if (pte_empty(pte)) {
368 clear_pmd_entry(pmd);
369 range = PMD_SIZE;
370 if (pmd_empty(pmd)) {
371 clear_pud_entry(pud);
372 range = PUD_SIZE;
373 }
374 }
375
376 addr += range;
377 }
378}
379
380/**
381 * kvm_free_stage2_pgd - free all stage-2 tables
382 * @kvm: The KVM struct pointer for the VM.
383 *
384 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
385 * underlying level-2 and level-3 tables before freeing the actual level-1 table
386 * and setting the struct pointer to NULL.
387 *
388 * Note we don't need locking here as this is only called when the VM is
389 * destroyed, which can only be done once.
390 */
391void kvm_free_stage2_pgd(struct kvm *kvm)
392{
393 if (kvm->arch.pgd == NULL)
394 return;
395
396 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
397 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
398 kvm->arch.pgd = NULL;
399}
400
401
402static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
403 phys_addr_t addr, const pte_t *new_pte, bool iomap)
404{
405 pgd_t *pgd;
406 pud_t *pud;
407 pmd_t *pmd;
408 pte_t *pte, old_pte;
409
410 /* Create 2nd stage page table mapping - Level 1 */
411 pgd = kvm->arch.pgd + pgd_index(addr);
412 pud = pud_offset(pgd, addr);
413 if (pud_none(*pud)) {
414 if (!cache)
415 return 0; /* ignore calls from kvm_set_spte_hva */
416 pmd = mmu_memory_cache_alloc(cache);
417 pud_populate(NULL, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500418 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100419 }
420
421 pmd = pmd_offset(pud, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500422
423 /* Create 2nd stage page table mapping - Level 2 */
424 if (pmd_none(*pmd)) {
425 if (!cache)
426 return 0; /* ignore calls from kvm_set_spte_hva */
427 pte = mmu_memory_cache_alloc(cache);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100428 kvm_clean_pte(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500429 pmd_populate_kernel(NULL, pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500430 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100431 }
432
433 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500434
435 if (iomap && pte_present(*pte))
436 return -EFAULT;
437
438 /* Create 2nd stage page table mapping - Level 3 */
439 old_pte = *pte;
440 kvm_set_pte(pte, *new_pte);
441 if (pte_present(old_pte))
Marc Zyngier48762762013-01-28 15:27:00 +0000442 kvm_tlb_flush_vmid_ipa(kvm, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500443 else
444 get_page(virt_to_page(pte));
445
446 return 0;
447}
448
449/**
450 * kvm_phys_addr_ioremap - map a device range to guest IPA
451 *
452 * @kvm: The KVM pointer
453 * @guest_ipa: The IPA at which to insert the mapping
454 * @pa: The physical address of the device
455 * @size: The size of the mapping
456 */
457int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
458 phys_addr_t pa, unsigned long size)
459{
460 phys_addr_t addr, end;
461 int ret = 0;
462 unsigned long pfn;
463 struct kvm_mmu_memory_cache cache = { 0, };
464
465 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
466 pfn = __phys_to_pfn(pa);
467
468 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100469 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
470 kvm_set_s2pte_writable(&pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500471
472 ret = mmu_topup_memory_cache(&cache, 2, 2);
473 if (ret)
474 goto out;
475 spin_lock(&kvm->mmu_lock);
476 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
477 spin_unlock(&kvm->mmu_lock);
478 if (ret)
479 goto out;
480
481 pfn++;
482 }
483
484out:
485 mmu_free_memory_cache(&cache);
486 return ret;
487}
488
Christoffer Dall94f8e642013-01-20 18:28:12 -0500489static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
490 gfn_t gfn, struct kvm_memory_slot *memslot,
491 unsigned long fault_status)
492{
493 pte_t new_pte;
494 pfn_t pfn;
495 int ret;
496 bool write_fault, writable;
497 unsigned long mmu_seq;
498 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
499
Marc Zyngier7393b592012-09-17 19:27:09 +0100500 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500501 if (fault_status == FSC_PERM && !write_fault) {
502 kvm_err("Unexpected L2 read permission error\n");
503 return -EFAULT;
504 }
505
506 /* We need minimum second+third level pages */
507 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
508 if (ret)
509 return ret;
510
511 mmu_seq = vcpu->kvm->mmu_notifier_seq;
512 /*
513 * Ensure the read of mmu_notifier_seq happens before we call
514 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
515 * the page we just got a reference to gets unmapped before we have a
516 * chance to grab the mmu_lock, which ensure that if the page gets
517 * unmapped afterwards, the call to kvm_unmap_hva will take it away
518 * from us again properly. This smp_rmb() interacts with the smp_wmb()
519 * in kvm_mmu_notifier_invalidate_<page|range_end>.
520 */
521 smp_rmb();
522
523 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
524 if (is_error_pfn(pfn))
525 return -EFAULT;
526
527 new_pte = pfn_pte(pfn, PAGE_S2);
528 coherent_icache_guest_page(vcpu->kvm, gfn);
529
530 spin_lock(&vcpu->kvm->mmu_lock);
531 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
532 goto out_unlock;
533 if (writable) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100534 kvm_set_s2pte_writable(&new_pte);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500535 kvm_set_pfn_dirty(pfn);
536 }
537 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
538
539out_unlock:
540 spin_unlock(&vcpu->kvm->mmu_lock);
541 kvm_release_pfn_clean(pfn);
542 return 0;
543}
544
545/**
546 * kvm_handle_guest_abort - handles all 2nd stage aborts
547 * @vcpu: the VCPU pointer
548 * @run: the kvm_run structure
549 *
550 * Any abort that gets to the host is almost guaranteed to be caused by a
551 * missing second stage translation table entry, which can mean that either the
552 * guest simply needs more memory and we must allocate an appropriate page or it
553 * can mean that the guest tried to access I/O memory, which is emulated by user
554 * space. The distinction is based on the IPA causing the fault and whether this
555 * memory region has been registered as standard RAM by user space.
556 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500557int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
558{
Christoffer Dall94f8e642013-01-20 18:28:12 -0500559 unsigned long fault_status;
560 phys_addr_t fault_ipa;
561 struct kvm_memory_slot *memslot;
562 bool is_iabt;
563 gfn_t gfn;
564 int ret, idx;
565
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100566 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Marc Zyngier7393b592012-09-17 19:27:09 +0100567 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500568
Marc Zyngier7393b592012-09-17 19:27:09 +0100569 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
570 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500571
572 /* Check the stage-2 fault is trans. fault or write fault */
Marc Zyngier1cc287d2012-09-18 14:14:35 +0100573 fault_status = kvm_vcpu_trap_get_fault(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500574 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100575 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
576 kvm_vcpu_trap_get_class(vcpu), fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500577 return -EFAULT;
578 }
579
580 idx = srcu_read_lock(&vcpu->kvm->srcu);
581
582 gfn = fault_ipa >> PAGE_SHIFT;
583 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
584 if (is_iabt) {
585 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +0100586 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500587 ret = 1;
588 goto out_unlock;
589 }
590
591 if (fault_status != FSC_FAULT) {
592 kvm_err("Unsupported fault status on io memory: %#lx\n",
593 fault_status);
594 ret = -EFAULT;
595 goto out_unlock;
596 }
597
Marc Zyngiercfe39502012-12-12 14:42:09 +0000598 /*
599 * The IPA is reported as [MAX:12], so we need to
600 * complement it with the bottom 12 bits from the
601 * faulting VA. This is always 12 bits, irrespective
602 * of the page size.
603 */
604 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500605 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500606 goto out_unlock;
607 }
608
609 memslot = gfn_to_memslot(vcpu->kvm, gfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500610
611 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
612 if (ret == 0)
613 ret = 1;
614out_unlock:
615 srcu_read_unlock(&vcpu->kvm->srcu, idx);
616 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500617}
618
Christoffer Dalld5d81842013-01-20 18:28:07 -0500619static void handle_hva_to_gpa(struct kvm *kvm,
620 unsigned long start,
621 unsigned long end,
622 void (*handler)(struct kvm *kvm,
623 gpa_t gpa, void *data),
624 void *data)
625{
626 struct kvm_memslots *slots;
627 struct kvm_memory_slot *memslot;
628
629 slots = kvm_memslots(kvm);
630
631 /* we only care about the pages that the guest sees */
632 kvm_for_each_memslot(memslot, slots) {
633 unsigned long hva_start, hva_end;
634 gfn_t gfn, gfn_end;
635
636 hva_start = max(start, memslot->userspace_addr);
637 hva_end = min(end, memslot->userspace_addr +
638 (memslot->npages << PAGE_SHIFT));
639 if (hva_start >= hva_end)
640 continue;
641
642 /*
643 * {gfn(page) | page intersects with [hva_start, hva_end)} =
644 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
645 */
646 gfn = hva_to_gfn_memslot(hva_start, memslot);
647 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
648
649 for (; gfn < gfn_end; ++gfn) {
650 gpa_t gpa = gfn << PAGE_SHIFT;
651 handler(kvm, gpa, data);
652 }
653 }
654}
655
656static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
657{
658 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
Marc Zyngier48762762013-01-28 15:27:00 +0000659 kvm_tlb_flush_vmid_ipa(kvm, gpa);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500660}
661
662int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
663{
664 unsigned long end = hva + PAGE_SIZE;
665
666 if (!kvm->arch.pgd)
667 return 0;
668
669 trace_kvm_unmap_hva(hva);
670 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
671 return 0;
672}
673
674int kvm_unmap_hva_range(struct kvm *kvm,
675 unsigned long start, unsigned long end)
676{
677 if (!kvm->arch.pgd)
678 return 0;
679
680 trace_kvm_unmap_hva_range(start, end);
681 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
682 return 0;
683}
684
685static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
686{
687 pte_t *pte = (pte_t *)data;
688
689 stage2_set_pte(kvm, NULL, gpa, pte, false);
690}
691
692
693void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
694{
695 unsigned long end = hva + PAGE_SIZE;
696 pte_t stage2_pte;
697
698 if (!kvm->arch.pgd)
699 return;
700
701 trace_kvm_set_spte_hva(hva);
702 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
703 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
704}
705
706void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
707{
708 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
709}
710
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500711phys_addr_t kvm_mmu_get_httbr(void)
712{
713 VM_BUG_ON(!virt_addr_valid(hyp_pgd));
714 return virt_to_phys(hyp_pgd);
715}
716
717int kvm_mmu_init(void)
718{
Marc Zyngier2fb41052013-04-12 19:12:03 +0100719 unsigned long hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
720 unsigned long hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
721 int err;
722
723 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500724 if (!hyp_pgd) {
725 kvm_err("Hyp mode PGD not allocated\n");
Marc Zyngier2fb41052013-04-12 19:12:03 +0100726 err = -ENOMEM;
727 goto out;
728 }
729
730 /* Create the idmap in the boot page tables */
731 err = __create_hyp_mappings(boot_hyp_pgd,
732 hyp_idmap_start, hyp_idmap_end,
733 __phys_to_pfn(hyp_idmap_start),
734 PAGE_HYP);
735
736 if (err) {
737 kvm_err("Failed to idmap %lx-%lx\n",
738 hyp_idmap_start, hyp_idmap_end);
739 goto out;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500740 }
741
742 return 0;
Marc Zyngier2fb41052013-04-12 19:12:03 +0100743out:
744 kfree(hyp_pgd);
745 return err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500746}
747
748/**
749 * kvm_clear_idmap - remove all idmaps from the hyp pgd
750 *
751 * Free the underlying pmds for all pgds in range and clear the pgds (but
752 * don't free them) afterwards.
753 */
754void kvm_clear_hyp_idmap(void)
755{
756 unsigned long addr, end;
757 unsigned long next;
758 pgd_t *pgd = hyp_pgd;
759 pud_t *pud;
760 pmd_t *pmd;
761
762 addr = virt_to_phys(__hyp_idmap_text_start);
763 end = virt_to_phys(__hyp_idmap_text_end);
764
765 pgd += pgd_index(addr);
766 do {
767 next = pgd_addr_end(addr, end);
768 if (pgd_none_or_clear_bad(pgd))
769 continue;
770 pud = pud_offset(pgd, addr);
771 pmd = pmd_offset(pud, addr);
772
773 pud_clear(pud);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100774 kvm_clean_pmd_entry(pmd);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500775 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
776 } while (pgd++, addr = next, addr < end);
777}