blob: 47ea93e6b0d816a8b53860825c1a409a06e6a200 [file] [log] [blame]
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05001/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/kvm_para.h>
26#include <linux/cpu.h>
27#include <linux/mm.h>
Marcelo Tosatti1da8a772008-02-22 12:21:37 -050028#include <linux/highmem.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050029#include <linux/hardirq.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020030#include <linux/notifier.h>
31#include <linux/reboot.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020032#include <linux/hash.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/kprobes.h>
Marcelo Tosattia90ede72009-02-11 22:45:42 -020036#include <asm/timer.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020037#include <asm/cpu.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020038#include <asm/traps.h>
39#include <asm/desc.h>
Gleb Natapov6c047cd2010-10-14 11:22:54 +020040#include <asm/tlbflush.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050041
42#define MMU_QUEUE_SIZE 1024
43
Gleb Natapovfd10cde2010-10-14 11:22:51 +020044static int kvmapf = 1;
45
46static int parse_no_kvmapf(char *arg)
47{
48 kvmapf = 0;
49 return 0;
50}
51
52early_param("no-kvmapf", parse_no_kvmapf);
53
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050054struct kvm_para_state {
55 u8 mmu_queue[MMU_QUEUE_SIZE];
56 int mmu_queue_len;
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050057};
58
59static DEFINE_PER_CPU(struct kvm_para_state, para_state);
Gleb Natapovfd10cde2010-10-14 11:22:51 +020060static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050061
62static struct kvm_para_state *kvm_para_state(void)
63{
64 return &per_cpu(para_state, raw_smp_processor_id());
65}
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050066
67/*
68 * No need for any "IO delay" on KVM
69 */
70static void kvm_io_delay(void)
71{
72}
73
Gleb Natapov631bc482010-10-14 11:22:52 +020074#define KVM_TASK_SLEEP_HASHBITS 8
75#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
76
77struct kvm_task_sleep_node {
78 struct hlist_node link;
79 wait_queue_head_t wq;
80 u32 token;
81 int cpu;
Gleb Natapov6c047cd2010-10-14 11:22:54 +020082 bool halted;
83 struct mm_struct *mm;
Gleb Natapov631bc482010-10-14 11:22:52 +020084};
85
86static struct kvm_task_sleep_head {
87 spinlock_t lock;
88 struct hlist_head list;
89} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
90
91static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
92 u32 token)
93{
94 struct hlist_node *p;
95
96 hlist_for_each(p, &b->list) {
97 struct kvm_task_sleep_node *n =
98 hlist_entry(p, typeof(*n), link);
99 if (n->token == token)
100 return n;
101 }
102
103 return NULL;
104}
105
106void kvm_async_pf_task_wait(u32 token)
107{
108 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
109 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
110 struct kvm_task_sleep_node n, *e;
111 DEFINE_WAIT(wait);
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200112 int cpu, idle;
113
114 cpu = get_cpu();
115 idle = idle_cpu(cpu);
116 put_cpu();
Gleb Natapov631bc482010-10-14 11:22:52 +0200117
118 spin_lock(&b->lock);
119 e = _find_apf_task(b, token);
120 if (e) {
121 /* dummy entry exist -> wake up was delivered ahead of PF */
122 hlist_del(&e->link);
123 kfree(e);
124 spin_unlock(&b->lock);
125 return;
126 }
127
128 n.token = token;
129 n.cpu = smp_processor_id();
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200130 n.mm = current->active_mm;
131 n.halted = idle || preempt_count() > 1;
132 atomic_inc(&n.mm->mm_count);
Gleb Natapov631bc482010-10-14 11:22:52 +0200133 init_waitqueue_head(&n.wq);
134 hlist_add_head(&n.link, &b->list);
135 spin_unlock(&b->lock);
136
137 for (;;) {
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200138 if (!n.halted)
139 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
Gleb Natapov631bc482010-10-14 11:22:52 +0200140 if (hlist_unhashed(&n.link))
141 break;
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200142
143 if (!n.halted) {
144 local_irq_enable();
145 schedule();
146 local_irq_disable();
147 } else {
148 /*
149 * We cannot reschedule. So halt.
150 */
151 native_safe_halt();
152 local_irq_disable();
153 }
Gleb Natapov631bc482010-10-14 11:22:52 +0200154 }
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200155 if (!n.halted)
156 finish_wait(&n.wq, &wait);
Gleb Natapov631bc482010-10-14 11:22:52 +0200157
158 return;
159}
160EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
161
162static void apf_task_wake_one(struct kvm_task_sleep_node *n)
163{
164 hlist_del_init(&n->link);
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200165 if (!n->mm)
166 return;
167 mmdrop(n->mm);
168 if (n->halted)
169 smp_send_reschedule(n->cpu);
170 else if (waitqueue_active(&n->wq))
Gleb Natapov631bc482010-10-14 11:22:52 +0200171 wake_up(&n->wq);
172}
173
174static void apf_task_wake_all(void)
175{
176 int i;
177
178 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
179 struct hlist_node *p, *next;
180 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
181 spin_lock(&b->lock);
182 hlist_for_each_safe(p, next, &b->list) {
183 struct kvm_task_sleep_node *n =
184 hlist_entry(p, typeof(*n), link);
185 if (n->cpu == smp_processor_id())
186 apf_task_wake_one(n);
187 }
188 spin_unlock(&b->lock);
189 }
190}
191
192void kvm_async_pf_task_wake(u32 token)
193{
194 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
195 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
196 struct kvm_task_sleep_node *n;
197
198 if (token == ~0) {
199 apf_task_wake_all();
200 return;
201 }
202
203again:
204 spin_lock(&b->lock);
205 n = _find_apf_task(b, token);
206 if (!n) {
207 /*
208 * async PF was not yet handled.
209 * Add dummy entry for the token.
210 */
211 n = kmalloc(sizeof(*n), GFP_ATOMIC);
212 if (!n) {
213 /*
214 * Allocation failed! Busy wait while other cpu
215 * handles async PF.
216 */
217 spin_unlock(&b->lock);
218 cpu_relax();
219 goto again;
220 }
221 n->token = token;
222 n->cpu = smp_processor_id();
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200223 n->mm = NULL;
Gleb Natapov631bc482010-10-14 11:22:52 +0200224 init_waitqueue_head(&n->wq);
225 hlist_add_head(&n->link, &b->list);
226 } else
227 apf_task_wake_one(n);
228 spin_unlock(&b->lock);
229 return;
230}
231EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
232
233u32 kvm_read_and_reset_pf_reason(void)
234{
235 u32 reason = 0;
236
237 if (__get_cpu_var(apf_reason).enabled) {
238 reason = __get_cpu_var(apf_reason).reason;
239 __get_cpu_var(apf_reason).reason = 0;
240 }
241
242 return reason;
243}
244EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
245
246dotraplinkage void __kprobes
247do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
248{
249 switch (kvm_read_and_reset_pf_reason()) {
250 default:
251 do_page_fault(regs, error_code);
252 break;
253 case KVM_PV_REASON_PAGE_NOT_PRESENT:
254 /* page is swapped out by the host. */
255 kvm_async_pf_task_wait((u32)read_cr2());
256 break;
257 case KVM_PV_REASON_PAGE_READY:
258 kvm_async_pf_task_wake((u32)read_cr2());
259 break;
260 }
261}
262
Marcelo Tosatti1da8a772008-02-22 12:21:37 -0500263static void kvm_mmu_op(void *buffer, unsigned len)
264{
265 int r;
266 unsigned long a1, a2;
267
268 do {
269 a1 = __pa(buffer);
270 a2 = 0; /* on i386 __pa() always returns <4G */
271 r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
272 buffer += r;
273 len -= r;
274 } while (len);
275}
276
Marcelo Tosatti096d14a2008-02-22 12:21:38 -0500277static void mmu_queue_flush(struct kvm_para_state *state)
278{
279 if (state->mmu_queue_len) {
280 kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
281 state->mmu_queue_len = 0;
282 }
283}
284
285static void kvm_deferred_mmu_op(void *buffer, int len)
286{
287 struct kvm_para_state *state = kvm_para_state();
288
Marcelo Tosatti6ba66172009-08-25 01:13:10 -0300289 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
Marcelo Tosatti096d14a2008-02-22 12:21:38 -0500290 kvm_mmu_op(buffer, len);
291 return;
292 }
293 if (state->mmu_queue_len + len > sizeof state->mmu_queue)
294 mmu_queue_flush(state);
295 memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
296 state->mmu_queue_len += len;
297}
298
Marcelo Tosatti1da8a772008-02-22 12:21:37 -0500299static void kvm_mmu_write(void *dest, u64 val)
300{
301 __u64 pte_phys;
302 struct kvm_mmu_op_write_pte wpte;
303
304#ifdef CONFIG_HIGHPTE
305 struct page *page;
306 unsigned long dst = (unsigned long) dest;
307
308 page = kmap_atomic_to_page(dest);
309 pte_phys = page_to_pfn(page);
310 pte_phys <<= PAGE_SHIFT;
311 pte_phys += (dst & ~(PAGE_MASK));
312#else
313 pte_phys = (unsigned long)__pa(dest);
314#endif
315 wpte.header.op = KVM_MMU_OP_WRITE_PTE;
316 wpte.pte_val = val;
317 wpte.pte_phys = pte_phys;
318
Marcelo Tosatti096d14a2008-02-22 12:21:38 -0500319 kvm_deferred_mmu_op(&wpte, sizeof wpte);
Marcelo Tosatti1da8a772008-02-22 12:21:37 -0500320}
321
322/*
323 * We only need to hook operations that are MMU writes. We hook these so that
324 * we can use lazy MMU mode to batch these operations. We could probably
325 * improve the performance of the host code if we used some of the information
326 * here to simplify processing of batched writes.
327 */
328static void kvm_set_pte(pte_t *ptep, pte_t pte)
329{
330 kvm_mmu_write(ptep, pte_val(pte));
331}
332
333static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
334 pte_t *ptep, pte_t pte)
335{
336 kvm_mmu_write(ptep, pte_val(pte));
337}
338
339static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
340{
341 kvm_mmu_write(pmdp, pmd_val(pmd));
342}
343
344#if PAGETABLE_LEVELS >= 3
345#ifdef CONFIG_X86_PAE
346static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
347{
348 kvm_mmu_write(ptep, pte_val(pte));
349}
350
Marcelo Tosatti1da8a772008-02-22 12:21:37 -0500351static void kvm_pte_clear(struct mm_struct *mm,
352 unsigned long addr, pte_t *ptep)
353{
354 kvm_mmu_write(ptep, 0);
355}
356
357static void kvm_pmd_clear(pmd_t *pmdp)
358{
359 kvm_mmu_write(pmdp, 0);
360}
361#endif
362
363static void kvm_set_pud(pud_t *pudp, pud_t pud)
364{
365 kvm_mmu_write(pudp, pud_val(pud));
366}
367
368#if PAGETABLE_LEVELS == 4
369static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
370{
371 kvm_mmu_write(pgdp, pgd_val(pgd));
372}
373#endif
374#endif /* PAGETABLE_LEVELS >= 3 */
375
376static void kvm_flush_tlb(void)
377{
378 struct kvm_mmu_op_flush_tlb ftlb = {
379 .header.op = KVM_MMU_OP_FLUSH_TLB,
380 };
381
Marcelo Tosatti096d14a2008-02-22 12:21:38 -0500382 kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
Marcelo Tosatti1da8a772008-02-22 12:21:37 -0500383}
384
Eduardo Habkostf8639932008-07-30 18:32:27 -0300385static void kvm_release_pt(unsigned long pfn)
Marcelo Tosatti1da8a772008-02-22 12:21:37 -0500386{
387 struct kvm_mmu_op_release_pt rpt = {
388 .header.op = KVM_MMU_OP_RELEASE_PT,
389 .pt_phys = (u64)pfn << PAGE_SHIFT,
390 };
391
392 kvm_mmu_op(&rpt, sizeof rpt);
393}
394
Marcelo Tosatti096d14a2008-02-22 12:21:38 -0500395static void kvm_enter_lazy_mmu(void)
396{
Marcelo Tosatti096d14a2008-02-22 12:21:38 -0500397 paravirt_enter_lazy_mmu();
Marcelo Tosatti096d14a2008-02-22 12:21:38 -0500398}
399
400static void kvm_leave_lazy_mmu(void)
401{
402 struct kvm_para_state *state = kvm_para_state();
403
404 mmu_queue_flush(state);
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -0800405 paravirt_leave_lazy_mmu();
Marcelo Tosatti096d14a2008-02-22 12:21:38 -0500406}
407
Rakib Mullickd3ac8812009-07-02 11:40:36 +0600408static void __init paravirt_ops_setup(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500409{
410 pv_info.name = "KVM";
411 pv_info.paravirt_enabled = 1;
412
413 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
414 pv_cpu_ops.io_delay = kvm_io_delay;
415
Marcelo Tosatti1da8a772008-02-22 12:21:37 -0500416 if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
417 pv_mmu_ops.set_pte = kvm_set_pte;
418 pv_mmu_ops.set_pte_at = kvm_set_pte_at;
419 pv_mmu_ops.set_pmd = kvm_set_pmd;
420#if PAGETABLE_LEVELS >= 3
421#ifdef CONFIG_X86_PAE
422 pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
Marcelo Tosatti1da8a772008-02-22 12:21:37 -0500423 pv_mmu_ops.pte_clear = kvm_pte_clear;
424 pv_mmu_ops.pmd_clear = kvm_pmd_clear;
425#endif
426 pv_mmu_ops.set_pud = kvm_set_pud;
427#if PAGETABLE_LEVELS == 4
428 pv_mmu_ops.set_pgd = kvm_set_pgd;
429#endif
430#endif
431 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
432 pv_mmu_ops.release_pte = kvm_release_pt;
433 pv_mmu_ops.release_pmd = kvm_release_pt;
434 pv_mmu_ops.release_pud = kvm_release_pt;
Marcelo Tosatti096d14a2008-02-22 12:21:38 -0500435
436 pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
437 pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
Marcelo Tosatti1da8a772008-02-22 12:21:37 -0500438 }
Marcelo Tosattia90ede72009-02-11 22:45:42 -0200439#ifdef CONFIG_X86_IO_APIC
440 no_timer_check = 1;
441#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500442}
443
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200444void __cpuinit kvm_guest_cpu_init(void)
445{
446 if (!kvm_para_available())
447 return;
448
449 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
450 u64 pa = __pa(&__get_cpu_var(apf_reason));
451
452 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
453 __get_cpu_var(apf_reason).enabled = 1;
454 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
455 smp_processor_id());
456 }
457}
458
459static void kvm_pv_disable_apf(void *unused)
460{
461 if (!__get_cpu_var(apf_reason).enabled)
462 return;
463
464 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
465 __get_cpu_var(apf_reason).enabled = 0;
466
467 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
468 smp_processor_id());
469}
470
471static int kvm_pv_reboot_notify(struct notifier_block *nb,
472 unsigned long code, void *unused)
473{
474 if (code == SYS_RESTART)
475 on_each_cpu(kvm_pv_disable_apf, NULL, 1);
476 return NOTIFY_DONE;
477}
478
479static struct notifier_block kvm_pv_reboot_nb = {
480 .notifier_call = kvm_pv_reboot_notify,
481};
482
Gleb Natapovca3f1012010-10-14 11:22:49 +0200483#ifdef CONFIG_SMP
484static void __init kvm_smp_prepare_boot_cpu(void)
485{
486 WARN_ON(kvm_register_clock("primary cpu clock"));
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200487 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200488 native_smp_prepare_boot_cpu();
489}
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200490
491static void kvm_guest_cpu_online(void *dummy)
492{
493 kvm_guest_cpu_init();
494}
495
496static void kvm_guest_cpu_offline(void *dummy)
497{
498 kvm_pv_disable_apf(NULL);
Gleb Natapov631bc482010-10-14 11:22:52 +0200499 apf_task_wake_all();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200500}
501
502static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
503 unsigned long action, void *hcpu)
504{
505 int cpu = (unsigned long)hcpu;
506 switch (action) {
507 case CPU_ONLINE:
508 case CPU_DOWN_FAILED:
509 case CPU_ONLINE_FROZEN:
510 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
511 break;
512 case CPU_DOWN_PREPARE:
513 case CPU_DOWN_PREPARE_FROZEN:
514 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
515 break;
516 default:
517 break;
518 }
519 return NOTIFY_OK;
520}
521
522static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
523 .notifier_call = kvm_cpu_notify,
524};
Gleb Natapovca3f1012010-10-14 11:22:49 +0200525#endif
526
Gleb Natapov631bc482010-10-14 11:22:52 +0200527static void __init kvm_apf_trap_init(void)
528{
529 set_intr_gate(14, &async_page_fault);
530}
531
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500532void __init kvm_guest_init(void)
533{
Gleb Natapov631bc482010-10-14 11:22:52 +0200534 int i;
535
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500536 if (!kvm_para_available())
537 return;
538
539 paravirt_ops_setup();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200540 register_reboot_notifier(&kvm_pv_reboot_nb);
Gleb Natapov631bc482010-10-14 11:22:52 +0200541 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
542 spin_lock_init(&async_pf_sleepers[i].lock);
543 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
544 x86_init.irqs.trap_init = kvm_apf_trap_init;
545
Gleb Natapovca3f1012010-10-14 11:22:49 +0200546#ifdef CONFIG_SMP
547 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200548 register_cpu_notifier(&kvm_cpu_notifier);
549#else
550 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200551#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500552}