blob: cbadc730496a67409d4a8d7eefcfb965761083ed [file] [log] [blame]
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05001/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/kvm_para.h>
26#include <linux/cpu.h>
27#include <linux/mm.h>
Marcelo Tosatti1da8a772008-02-22 12:21:37 -050028#include <linux/highmem.h>
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050029
30/*
31 * No need for any "IO delay" on KVM
32 */
33static void kvm_io_delay(void)
34{
35}
36
Marcelo Tosatti1da8a772008-02-22 12:21:37 -050037static void kvm_mmu_op(void *buffer, unsigned len)
38{
39 int r;
40 unsigned long a1, a2;
41
42 do {
43 a1 = __pa(buffer);
44 a2 = 0; /* on i386 __pa() always returns <4G */
45 r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
46 buffer += r;
47 len -= r;
48 } while (len);
49}
50
51static void kvm_mmu_write(void *dest, u64 val)
52{
53 __u64 pte_phys;
54 struct kvm_mmu_op_write_pte wpte;
55
56#ifdef CONFIG_HIGHPTE
57 struct page *page;
58 unsigned long dst = (unsigned long) dest;
59
60 page = kmap_atomic_to_page(dest);
61 pte_phys = page_to_pfn(page);
62 pte_phys <<= PAGE_SHIFT;
63 pte_phys += (dst & ~(PAGE_MASK));
64#else
65 pte_phys = (unsigned long)__pa(dest);
66#endif
67 wpte.header.op = KVM_MMU_OP_WRITE_PTE;
68 wpte.pte_val = val;
69 wpte.pte_phys = pte_phys;
70
71 kvm_mmu_op(&wpte, sizeof wpte);
72}
73
74/*
75 * We only need to hook operations that are MMU writes. We hook these so that
76 * we can use lazy MMU mode to batch these operations. We could probably
77 * improve the performance of the host code if we used some of the information
78 * here to simplify processing of batched writes.
79 */
80static void kvm_set_pte(pte_t *ptep, pte_t pte)
81{
82 kvm_mmu_write(ptep, pte_val(pte));
83}
84
85static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
86 pte_t *ptep, pte_t pte)
87{
88 kvm_mmu_write(ptep, pte_val(pte));
89}
90
91static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
92{
93 kvm_mmu_write(pmdp, pmd_val(pmd));
94}
95
96#if PAGETABLE_LEVELS >= 3
97#ifdef CONFIG_X86_PAE
98static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
99{
100 kvm_mmu_write(ptep, pte_val(pte));
101}
102
103static void kvm_set_pte_present(struct mm_struct *mm, unsigned long addr,
104 pte_t *ptep, pte_t pte)
105{
106 kvm_mmu_write(ptep, pte_val(pte));
107}
108
109static void kvm_pte_clear(struct mm_struct *mm,
110 unsigned long addr, pte_t *ptep)
111{
112 kvm_mmu_write(ptep, 0);
113}
114
115static void kvm_pmd_clear(pmd_t *pmdp)
116{
117 kvm_mmu_write(pmdp, 0);
118}
119#endif
120
121static void kvm_set_pud(pud_t *pudp, pud_t pud)
122{
123 kvm_mmu_write(pudp, pud_val(pud));
124}
125
126#if PAGETABLE_LEVELS == 4
127static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
128{
129 kvm_mmu_write(pgdp, pgd_val(pgd));
130}
131#endif
132#endif /* PAGETABLE_LEVELS >= 3 */
133
134static void kvm_flush_tlb(void)
135{
136 struct kvm_mmu_op_flush_tlb ftlb = {
137 .header.op = KVM_MMU_OP_FLUSH_TLB,
138 };
139
140 kvm_mmu_op(&ftlb, sizeof ftlb);
141}
142
143static void kvm_release_pt(u32 pfn)
144{
145 struct kvm_mmu_op_release_pt rpt = {
146 .header.op = KVM_MMU_OP_RELEASE_PT,
147 .pt_phys = (u64)pfn << PAGE_SHIFT,
148 };
149
150 kvm_mmu_op(&rpt, sizeof rpt);
151}
152
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500153static void paravirt_ops_setup(void)
154{
155 pv_info.name = "KVM";
156 pv_info.paravirt_enabled = 1;
157
158 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
159 pv_cpu_ops.io_delay = kvm_io_delay;
160
Marcelo Tosatti1da8a772008-02-22 12:21:37 -0500161 if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
162 pv_mmu_ops.set_pte = kvm_set_pte;
163 pv_mmu_ops.set_pte_at = kvm_set_pte_at;
164 pv_mmu_ops.set_pmd = kvm_set_pmd;
165#if PAGETABLE_LEVELS >= 3
166#ifdef CONFIG_X86_PAE
167 pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
168 pv_mmu_ops.set_pte_present = kvm_set_pte_present;
169 pv_mmu_ops.pte_clear = kvm_pte_clear;
170 pv_mmu_ops.pmd_clear = kvm_pmd_clear;
171#endif
172 pv_mmu_ops.set_pud = kvm_set_pud;
173#if PAGETABLE_LEVELS == 4
174 pv_mmu_ops.set_pgd = kvm_set_pgd;
175#endif
176#endif
177 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
178 pv_mmu_ops.release_pte = kvm_release_pt;
179 pv_mmu_ops.release_pmd = kvm_release_pt;
180 pv_mmu_ops.release_pud = kvm_release_pt;
181 }
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500182}
183
184void __init kvm_guest_init(void)
185{
186 if (!kvm_para_available())
187 return;
188
189 paravirt_ops_setup();
190}