Marcelo Tosatti | 0cf1bfd | 2008-02-22 12:21:36 -0500 | [diff] [blame] | 1 | /* |
| 2 | * KVM paravirt_ops implementation |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 17 | * |
| 18 | * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 19 | * Copyright IBM Corporation, 2007 |
| 20 | * Authors: Anthony Liguori <aliguori@us.ibm.com> |
| 21 | */ |
| 22 | |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/kernel.h> |
| 25 | #include <linux/kvm_para.h> |
| 26 | #include <linux/cpu.h> |
| 27 | #include <linux/mm.h> |
Marcelo Tosatti | 1da8a77 | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 28 | #include <linux/highmem.h> |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 29 | #include <linux/hardirq.h> |
Gleb Natapov | fd10cde | 2010-10-14 11:22:51 +0200 | [diff] [blame] | 30 | #include <linux/notifier.h> |
| 31 | #include <linux/reboot.h> |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 32 | #include <linux/hash.h> |
| 33 | #include <linux/sched.h> |
| 34 | #include <linux/slab.h> |
| 35 | #include <linux/kprobes.h> |
Marcelo Tosatti | a90ede7 | 2009-02-11 22:45:42 -0200 | [diff] [blame] | 36 | #include <asm/timer.h> |
Gleb Natapov | fd10cde | 2010-10-14 11:22:51 +0200 | [diff] [blame] | 37 | #include <asm/cpu.h> |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 38 | #include <asm/traps.h> |
| 39 | #include <asm/desc.h> |
Gleb Natapov | 6c047cd | 2010-10-14 11:22:54 +0200 | [diff] [blame^] | 40 | #include <asm/tlbflush.h> |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 41 | |
| 42 | #define MMU_QUEUE_SIZE 1024 |
| 43 | |
Gleb Natapov | fd10cde | 2010-10-14 11:22:51 +0200 | [diff] [blame] | 44 | static int kvmapf = 1; |
| 45 | |
| 46 | static int parse_no_kvmapf(char *arg) |
| 47 | { |
| 48 | kvmapf = 0; |
| 49 | return 0; |
| 50 | } |
| 51 | |
| 52 | early_param("no-kvmapf", parse_no_kvmapf); |
| 53 | |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 54 | struct kvm_para_state { |
| 55 | u8 mmu_queue[MMU_QUEUE_SIZE]; |
| 56 | int mmu_queue_len; |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 57 | }; |
| 58 | |
| 59 | static DEFINE_PER_CPU(struct kvm_para_state, para_state); |
Gleb Natapov | fd10cde | 2010-10-14 11:22:51 +0200 | [diff] [blame] | 60 | static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 61 | |
| 62 | static struct kvm_para_state *kvm_para_state(void) |
| 63 | { |
| 64 | return &per_cpu(para_state, raw_smp_processor_id()); |
| 65 | } |
Marcelo Tosatti | 0cf1bfd | 2008-02-22 12:21:36 -0500 | [diff] [blame] | 66 | |
| 67 | /* |
| 68 | * No need for any "IO delay" on KVM |
| 69 | */ |
| 70 | static void kvm_io_delay(void) |
| 71 | { |
| 72 | } |
| 73 | |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 74 | #define KVM_TASK_SLEEP_HASHBITS 8 |
| 75 | #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) |
| 76 | |
| 77 | struct kvm_task_sleep_node { |
| 78 | struct hlist_node link; |
| 79 | wait_queue_head_t wq; |
| 80 | u32 token; |
| 81 | int cpu; |
Gleb Natapov | 6c047cd | 2010-10-14 11:22:54 +0200 | [diff] [blame^] | 82 | bool halted; |
| 83 | struct mm_struct *mm; |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 84 | }; |
| 85 | |
| 86 | static struct kvm_task_sleep_head { |
| 87 | spinlock_t lock; |
| 88 | struct hlist_head list; |
| 89 | } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; |
| 90 | |
| 91 | static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, |
| 92 | u32 token) |
| 93 | { |
| 94 | struct hlist_node *p; |
| 95 | |
| 96 | hlist_for_each(p, &b->list) { |
| 97 | struct kvm_task_sleep_node *n = |
| 98 | hlist_entry(p, typeof(*n), link); |
| 99 | if (n->token == token) |
| 100 | return n; |
| 101 | } |
| 102 | |
| 103 | return NULL; |
| 104 | } |
| 105 | |
| 106 | void kvm_async_pf_task_wait(u32 token) |
| 107 | { |
| 108 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); |
| 109 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; |
| 110 | struct kvm_task_sleep_node n, *e; |
| 111 | DEFINE_WAIT(wait); |
Gleb Natapov | 6c047cd | 2010-10-14 11:22:54 +0200 | [diff] [blame^] | 112 | int cpu, idle; |
| 113 | |
| 114 | cpu = get_cpu(); |
| 115 | idle = idle_cpu(cpu); |
| 116 | put_cpu(); |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 117 | |
| 118 | spin_lock(&b->lock); |
| 119 | e = _find_apf_task(b, token); |
| 120 | if (e) { |
| 121 | /* dummy entry exist -> wake up was delivered ahead of PF */ |
| 122 | hlist_del(&e->link); |
| 123 | kfree(e); |
| 124 | spin_unlock(&b->lock); |
| 125 | return; |
| 126 | } |
| 127 | |
| 128 | n.token = token; |
| 129 | n.cpu = smp_processor_id(); |
Gleb Natapov | 6c047cd | 2010-10-14 11:22:54 +0200 | [diff] [blame^] | 130 | n.mm = current->active_mm; |
| 131 | n.halted = idle || preempt_count() > 1; |
| 132 | atomic_inc(&n.mm->mm_count); |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 133 | init_waitqueue_head(&n.wq); |
| 134 | hlist_add_head(&n.link, &b->list); |
| 135 | spin_unlock(&b->lock); |
| 136 | |
| 137 | for (;;) { |
Gleb Natapov | 6c047cd | 2010-10-14 11:22:54 +0200 | [diff] [blame^] | 138 | if (!n.halted) |
| 139 | prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 140 | if (hlist_unhashed(&n.link)) |
| 141 | break; |
Gleb Natapov | 6c047cd | 2010-10-14 11:22:54 +0200 | [diff] [blame^] | 142 | |
| 143 | if (!n.halted) { |
| 144 | local_irq_enable(); |
| 145 | schedule(); |
| 146 | local_irq_disable(); |
| 147 | } else { |
| 148 | /* |
| 149 | * We cannot reschedule. So halt. |
| 150 | */ |
| 151 | native_safe_halt(); |
| 152 | local_irq_disable(); |
| 153 | } |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 154 | } |
Gleb Natapov | 6c047cd | 2010-10-14 11:22:54 +0200 | [diff] [blame^] | 155 | if (!n.halted) |
| 156 | finish_wait(&n.wq, &wait); |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 157 | |
| 158 | return; |
| 159 | } |
| 160 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); |
| 161 | |
| 162 | static void apf_task_wake_one(struct kvm_task_sleep_node *n) |
| 163 | { |
| 164 | hlist_del_init(&n->link); |
Gleb Natapov | 6c047cd | 2010-10-14 11:22:54 +0200 | [diff] [blame^] | 165 | if (!n->mm) |
| 166 | return; |
| 167 | mmdrop(n->mm); |
| 168 | if (n->halted) |
| 169 | smp_send_reschedule(n->cpu); |
| 170 | else if (waitqueue_active(&n->wq)) |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 171 | wake_up(&n->wq); |
| 172 | } |
| 173 | |
| 174 | static void apf_task_wake_all(void) |
| 175 | { |
| 176 | int i; |
| 177 | |
| 178 | for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { |
| 179 | struct hlist_node *p, *next; |
| 180 | struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; |
| 181 | spin_lock(&b->lock); |
| 182 | hlist_for_each_safe(p, next, &b->list) { |
| 183 | struct kvm_task_sleep_node *n = |
| 184 | hlist_entry(p, typeof(*n), link); |
| 185 | if (n->cpu == smp_processor_id()) |
| 186 | apf_task_wake_one(n); |
| 187 | } |
| 188 | spin_unlock(&b->lock); |
| 189 | } |
| 190 | } |
| 191 | |
| 192 | void kvm_async_pf_task_wake(u32 token) |
| 193 | { |
| 194 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); |
| 195 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; |
| 196 | struct kvm_task_sleep_node *n; |
| 197 | |
| 198 | if (token == ~0) { |
| 199 | apf_task_wake_all(); |
| 200 | return; |
| 201 | } |
| 202 | |
| 203 | again: |
| 204 | spin_lock(&b->lock); |
| 205 | n = _find_apf_task(b, token); |
| 206 | if (!n) { |
| 207 | /* |
| 208 | * async PF was not yet handled. |
| 209 | * Add dummy entry for the token. |
| 210 | */ |
| 211 | n = kmalloc(sizeof(*n), GFP_ATOMIC); |
| 212 | if (!n) { |
| 213 | /* |
| 214 | * Allocation failed! Busy wait while other cpu |
| 215 | * handles async PF. |
| 216 | */ |
| 217 | spin_unlock(&b->lock); |
| 218 | cpu_relax(); |
| 219 | goto again; |
| 220 | } |
| 221 | n->token = token; |
| 222 | n->cpu = smp_processor_id(); |
Gleb Natapov | 6c047cd | 2010-10-14 11:22:54 +0200 | [diff] [blame^] | 223 | n->mm = NULL; |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 224 | init_waitqueue_head(&n->wq); |
| 225 | hlist_add_head(&n->link, &b->list); |
| 226 | } else |
| 227 | apf_task_wake_one(n); |
| 228 | spin_unlock(&b->lock); |
| 229 | return; |
| 230 | } |
| 231 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); |
| 232 | |
| 233 | u32 kvm_read_and_reset_pf_reason(void) |
| 234 | { |
| 235 | u32 reason = 0; |
| 236 | |
| 237 | if (__get_cpu_var(apf_reason).enabled) { |
| 238 | reason = __get_cpu_var(apf_reason).reason; |
| 239 | __get_cpu_var(apf_reason).reason = 0; |
| 240 | } |
| 241 | |
| 242 | return reason; |
| 243 | } |
| 244 | EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); |
| 245 | |
| 246 | dotraplinkage void __kprobes |
| 247 | do_async_page_fault(struct pt_regs *regs, unsigned long error_code) |
| 248 | { |
| 249 | switch (kvm_read_and_reset_pf_reason()) { |
| 250 | default: |
| 251 | do_page_fault(regs, error_code); |
| 252 | break; |
| 253 | case KVM_PV_REASON_PAGE_NOT_PRESENT: |
| 254 | /* page is swapped out by the host. */ |
| 255 | kvm_async_pf_task_wait((u32)read_cr2()); |
| 256 | break; |
| 257 | case KVM_PV_REASON_PAGE_READY: |
| 258 | kvm_async_pf_task_wake((u32)read_cr2()); |
| 259 | break; |
| 260 | } |
| 261 | } |
| 262 | |
Marcelo Tosatti | 1da8a77 | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 263 | static void kvm_mmu_op(void *buffer, unsigned len) |
| 264 | { |
| 265 | int r; |
| 266 | unsigned long a1, a2; |
| 267 | |
| 268 | do { |
| 269 | a1 = __pa(buffer); |
| 270 | a2 = 0; /* on i386 __pa() always returns <4G */ |
| 271 | r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2); |
| 272 | buffer += r; |
| 273 | len -= r; |
| 274 | } while (len); |
| 275 | } |
| 276 | |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 277 | static void mmu_queue_flush(struct kvm_para_state *state) |
| 278 | { |
| 279 | if (state->mmu_queue_len) { |
| 280 | kvm_mmu_op(state->mmu_queue, state->mmu_queue_len); |
| 281 | state->mmu_queue_len = 0; |
| 282 | } |
| 283 | } |
| 284 | |
| 285 | static void kvm_deferred_mmu_op(void *buffer, int len) |
| 286 | { |
| 287 | struct kvm_para_state *state = kvm_para_state(); |
| 288 | |
Marcelo Tosatti | 6ba6617 | 2009-08-25 01:13:10 -0300 | [diff] [blame] | 289 | if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) { |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 290 | kvm_mmu_op(buffer, len); |
| 291 | return; |
| 292 | } |
| 293 | if (state->mmu_queue_len + len > sizeof state->mmu_queue) |
| 294 | mmu_queue_flush(state); |
| 295 | memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len); |
| 296 | state->mmu_queue_len += len; |
| 297 | } |
| 298 | |
Marcelo Tosatti | 1da8a77 | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 299 | static void kvm_mmu_write(void *dest, u64 val) |
| 300 | { |
| 301 | __u64 pte_phys; |
| 302 | struct kvm_mmu_op_write_pte wpte; |
| 303 | |
| 304 | #ifdef CONFIG_HIGHPTE |
| 305 | struct page *page; |
| 306 | unsigned long dst = (unsigned long) dest; |
| 307 | |
| 308 | page = kmap_atomic_to_page(dest); |
| 309 | pte_phys = page_to_pfn(page); |
| 310 | pte_phys <<= PAGE_SHIFT; |
| 311 | pte_phys += (dst & ~(PAGE_MASK)); |
| 312 | #else |
| 313 | pte_phys = (unsigned long)__pa(dest); |
| 314 | #endif |
| 315 | wpte.header.op = KVM_MMU_OP_WRITE_PTE; |
| 316 | wpte.pte_val = val; |
| 317 | wpte.pte_phys = pte_phys; |
| 318 | |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 319 | kvm_deferred_mmu_op(&wpte, sizeof wpte); |
Marcelo Tosatti | 1da8a77 | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 320 | } |
| 321 | |
| 322 | /* |
| 323 | * We only need to hook operations that are MMU writes. We hook these so that |
| 324 | * we can use lazy MMU mode to batch these operations. We could probably |
| 325 | * improve the performance of the host code if we used some of the information |
| 326 | * here to simplify processing of batched writes. |
| 327 | */ |
| 328 | static void kvm_set_pte(pte_t *ptep, pte_t pte) |
| 329 | { |
| 330 | kvm_mmu_write(ptep, pte_val(pte)); |
| 331 | } |
| 332 | |
| 333 | static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 334 | pte_t *ptep, pte_t pte) |
| 335 | { |
| 336 | kvm_mmu_write(ptep, pte_val(pte)); |
| 337 | } |
| 338 | |
| 339 | static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd) |
| 340 | { |
| 341 | kvm_mmu_write(pmdp, pmd_val(pmd)); |
| 342 | } |
| 343 | |
| 344 | #if PAGETABLE_LEVELS >= 3 |
| 345 | #ifdef CONFIG_X86_PAE |
| 346 | static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte) |
| 347 | { |
| 348 | kvm_mmu_write(ptep, pte_val(pte)); |
| 349 | } |
| 350 | |
Marcelo Tosatti | 1da8a77 | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 351 | static void kvm_pte_clear(struct mm_struct *mm, |
| 352 | unsigned long addr, pte_t *ptep) |
| 353 | { |
| 354 | kvm_mmu_write(ptep, 0); |
| 355 | } |
| 356 | |
| 357 | static void kvm_pmd_clear(pmd_t *pmdp) |
| 358 | { |
| 359 | kvm_mmu_write(pmdp, 0); |
| 360 | } |
| 361 | #endif |
| 362 | |
| 363 | static void kvm_set_pud(pud_t *pudp, pud_t pud) |
| 364 | { |
| 365 | kvm_mmu_write(pudp, pud_val(pud)); |
| 366 | } |
| 367 | |
| 368 | #if PAGETABLE_LEVELS == 4 |
| 369 | static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd) |
| 370 | { |
| 371 | kvm_mmu_write(pgdp, pgd_val(pgd)); |
| 372 | } |
| 373 | #endif |
| 374 | #endif /* PAGETABLE_LEVELS >= 3 */ |
| 375 | |
| 376 | static void kvm_flush_tlb(void) |
| 377 | { |
| 378 | struct kvm_mmu_op_flush_tlb ftlb = { |
| 379 | .header.op = KVM_MMU_OP_FLUSH_TLB, |
| 380 | }; |
| 381 | |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 382 | kvm_deferred_mmu_op(&ftlb, sizeof ftlb); |
Marcelo Tosatti | 1da8a77 | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 383 | } |
| 384 | |
Eduardo Habkost | f863993 | 2008-07-30 18:32:27 -0300 | [diff] [blame] | 385 | static void kvm_release_pt(unsigned long pfn) |
Marcelo Tosatti | 1da8a77 | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 386 | { |
| 387 | struct kvm_mmu_op_release_pt rpt = { |
| 388 | .header.op = KVM_MMU_OP_RELEASE_PT, |
| 389 | .pt_phys = (u64)pfn << PAGE_SHIFT, |
| 390 | }; |
| 391 | |
| 392 | kvm_mmu_op(&rpt, sizeof rpt); |
| 393 | } |
| 394 | |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 395 | static void kvm_enter_lazy_mmu(void) |
| 396 | { |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 397 | paravirt_enter_lazy_mmu(); |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 398 | } |
| 399 | |
| 400 | static void kvm_leave_lazy_mmu(void) |
| 401 | { |
| 402 | struct kvm_para_state *state = kvm_para_state(); |
| 403 | |
| 404 | mmu_queue_flush(state); |
Jeremy Fitzhardinge | b407fc5 | 2009-02-17 23:46:21 -0800 | [diff] [blame] | 405 | paravirt_leave_lazy_mmu(); |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 406 | } |
| 407 | |
Rakib Mullick | d3ac881 | 2009-07-02 11:40:36 +0600 | [diff] [blame] | 408 | static void __init paravirt_ops_setup(void) |
Marcelo Tosatti | 0cf1bfd | 2008-02-22 12:21:36 -0500 | [diff] [blame] | 409 | { |
| 410 | pv_info.name = "KVM"; |
| 411 | pv_info.paravirt_enabled = 1; |
| 412 | |
| 413 | if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) |
| 414 | pv_cpu_ops.io_delay = kvm_io_delay; |
| 415 | |
Marcelo Tosatti | 1da8a77 | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 416 | if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) { |
| 417 | pv_mmu_ops.set_pte = kvm_set_pte; |
| 418 | pv_mmu_ops.set_pte_at = kvm_set_pte_at; |
| 419 | pv_mmu_ops.set_pmd = kvm_set_pmd; |
| 420 | #if PAGETABLE_LEVELS >= 3 |
| 421 | #ifdef CONFIG_X86_PAE |
| 422 | pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic; |
Marcelo Tosatti | 1da8a77 | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 423 | pv_mmu_ops.pte_clear = kvm_pte_clear; |
| 424 | pv_mmu_ops.pmd_clear = kvm_pmd_clear; |
| 425 | #endif |
| 426 | pv_mmu_ops.set_pud = kvm_set_pud; |
| 427 | #if PAGETABLE_LEVELS == 4 |
| 428 | pv_mmu_ops.set_pgd = kvm_set_pgd; |
| 429 | #endif |
| 430 | #endif |
| 431 | pv_mmu_ops.flush_tlb_user = kvm_flush_tlb; |
| 432 | pv_mmu_ops.release_pte = kvm_release_pt; |
| 433 | pv_mmu_ops.release_pmd = kvm_release_pt; |
| 434 | pv_mmu_ops.release_pud = kvm_release_pt; |
Marcelo Tosatti | 096d14a | 2008-02-22 12:21:38 -0500 | [diff] [blame] | 435 | |
| 436 | pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu; |
| 437 | pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu; |
Marcelo Tosatti | 1da8a77 | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 438 | } |
Marcelo Tosatti | a90ede7 | 2009-02-11 22:45:42 -0200 | [diff] [blame] | 439 | #ifdef CONFIG_X86_IO_APIC |
| 440 | no_timer_check = 1; |
| 441 | #endif |
Marcelo Tosatti | 0cf1bfd | 2008-02-22 12:21:36 -0500 | [diff] [blame] | 442 | } |
| 443 | |
Gleb Natapov | fd10cde | 2010-10-14 11:22:51 +0200 | [diff] [blame] | 444 | void __cpuinit kvm_guest_cpu_init(void) |
| 445 | { |
| 446 | if (!kvm_para_available()) |
| 447 | return; |
| 448 | |
| 449 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { |
| 450 | u64 pa = __pa(&__get_cpu_var(apf_reason)); |
| 451 | |
| 452 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); |
| 453 | __get_cpu_var(apf_reason).enabled = 1; |
| 454 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", |
| 455 | smp_processor_id()); |
| 456 | } |
| 457 | } |
| 458 | |
| 459 | static void kvm_pv_disable_apf(void *unused) |
| 460 | { |
| 461 | if (!__get_cpu_var(apf_reason).enabled) |
| 462 | return; |
| 463 | |
| 464 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); |
| 465 | __get_cpu_var(apf_reason).enabled = 0; |
| 466 | |
| 467 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", |
| 468 | smp_processor_id()); |
| 469 | } |
| 470 | |
| 471 | static int kvm_pv_reboot_notify(struct notifier_block *nb, |
| 472 | unsigned long code, void *unused) |
| 473 | { |
| 474 | if (code == SYS_RESTART) |
| 475 | on_each_cpu(kvm_pv_disable_apf, NULL, 1); |
| 476 | return NOTIFY_DONE; |
| 477 | } |
| 478 | |
| 479 | static struct notifier_block kvm_pv_reboot_nb = { |
| 480 | .notifier_call = kvm_pv_reboot_notify, |
| 481 | }; |
| 482 | |
Gleb Natapov | ca3f101 | 2010-10-14 11:22:49 +0200 | [diff] [blame] | 483 | #ifdef CONFIG_SMP |
| 484 | static void __init kvm_smp_prepare_boot_cpu(void) |
| 485 | { |
| 486 | WARN_ON(kvm_register_clock("primary cpu clock")); |
Gleb Natapov | fd10cde | 2010-10-14 11:22:51 +0200 | [diff] [blame] | 487 | kvm_guest_cpu_init(); |
Gleb Natapov | ca3f101 | 2010-10-14 11:22:49 +0200 | [diff] [blame] | 488 | native_smp_prepare_boot_cpu(); |
| 489 | } |
Gleb Natapov | fd10cde | 2010-10-14 11:22:51 +0200 | [diff] [blame] | 490 | |
| 491 | static void kvm_guest_cpu_online(void *dummy) |
| 492 | { |
| 493 | kvm_guest_cpu_init(); |
| 494 | } |
| 495 | |
| 496 | static void kvm_guest_cpu_offline(void *dummy) |
| 497 | { |
| 498 | kvm_pv_disable_apf(NULL); |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 499 | apf_task_wake_all(); |
Gleb Natapov | fd10cde | 2010-10-14 11:22:51 +0200 | [diff] [blame] | 500 | } |
| 501 | |
| 502 | static int __cpuinit kvm_cpu_notify(struct notifier_block *self, |
| 503 | unsigned long action, void *hcpu) |
| 504 | { |
| 505 | int cpu = (unsigned long)hcpu; |
| 506 | switch (action) { |
| 507 | case CPU_ONLINE: |
| 508 | case CPU_DOWN_FAILED: |
| 509 | case CPU_ONLINE_FROZEN: |
| 510 | smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0); |
| 511 | break; |
| 512 | case CPU_DOWN_PREPARE: |
| 513 | case CPU_DOWN_PREPARE_FROZEN: |
| 514 | smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1); |
| 515 | break; |
| 516 | default: |
| 517 | break; |
| 518 | } |
| 519 | return NOTIFY_OK; |
| 520 | } |
| 521 | |
| 522 | static struct notifier_block __cpuinitdata kvm_cpu_notifier = { |
| 523 | .notifier_call = kvm_cpu_notify, |
| 524 | }; |
Gleb Natapov | ca3f101 | 2010-10-14 11:22:49 +0200 | [diff] [blame] | 525 | #endif |
| 526 | |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 527 | static void __init kvm_apf_trap_init(void) |
| 528 | { |
| 529 | set_intr_gate(14, &async_page_fault); |
| 530 | } |
| 531 | |
Marcelo Tosatti | 0cf1bfd | 2008-02-22 12:21:36 -0500 | [diff] [blame] | 532 | void __init kvm_guest_init(void) |
| 533 | { |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 534 | int i; |
| 535 | |
Marcelo Tosatti | 0cf1bfd | 2008-02-22 12:21:36 -0500 | [diff] [blame] | 536 | if (!kvm_para_available()) |
| 537 | return; |
| 538 | |
| 539 | paravirt_ops_setup(); |
Gleb Natapov | fd10cde | 2010-10-14 11:22:51 +0200 | [diff] [blame] | 540 | register_reboot_notifier(&kvm_pv_reboot_nb); |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 541 | for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) |
| 542 | spin_lock_init(&async_pf_sleepers[i].lock); |
| 543 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) |
| 544 | x86_init.irqs.trap_init = kvm_apf_trap_init; |
| 545 | |
Gleb Natapov | ca3f101 | 2010-10-14 11:22:49 +0200 | [diff] [blame] | 546 | #ifdef CONFIG_SMP |
| 547 | smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; |
Gleb Natapov | fd10cde | 2010-10-14 11:22:51 +0200 | [diff] [blame] | 548 | register_cpu_notifier(&kvm_cpu_notifier); |
| 549 | #else |
| 550 | kvm_guest_cpu_init(); |
Gleb Natapov | ca3f101 | 2010-10-14 11:22:49 +0200 | [diff] [blame] | 551 | #endif |
Marcelo Tosatti | 0cf1bfd | 2008-02-22 12:21:36 -0500 | [diff] [blame] | 552 | } |