blob: b2046e4d0b59e9b2b50720783b2aec4145012985 [file] [log] [blame]
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05001/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
Frederic Weisbecker56dd9472013-02-24 00:23:25 +010023#include <linux/context_tracking.h>
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050024#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/kvm_para.h>
27#include <linux/cpu.h>
28#include <linux/mm.h>
Marcelo Tosatti1da8a772008-02-22 12:21:37 -050029#include <linux/highmem.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050030#include <linux/hardirq.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020031#include <linux/notifier.h>
32#include <linux/reboot.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020033#include <linux/hash.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/kprobes.h>
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +053037#include <linux/debugfs.h>
Marcelo Tosattia90ede72009-02-11 22:45:42 -020038#include <asm/timer.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020039#include <asm/cpu.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020040#include <asm/traps.h>
41#include <asm/desc.h>
Gleb Natapov6c047cd2010-10-14 11:22:54 +020042#include <asm/tlbflush.h>
Gleb Natapove0875922012-04-04 15:30:33 +030043#include <asm/idle.h>
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +030044#include <asm/apic.h>
45#include <asm/apicdef.h>
Prarit Bhargavafc733732012-07-06 13:47:39 -040046#include <asm/hypervisor.h>
Marcelo Tosatti3dc4f7c2012-11-27 23:28:56 -020047#include <asm/kvm_guest.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050048
Gleb Natapovfd10cde2010-10-14 11:22:51 +020049static int kvmapf = 1;
50
51static int parse_no_kvmapf(char *arg)
52{
53 kvmapf = 0;
54 return 0;
55}
56
57early_param("no-kvmapf", parse_no_kvmapf);
58
Glauber Costad910f5c2011-07-11 15:28:19 -040059static int steal_acc = 1;
60static int parse_no_stealacc(char *arg)
61{
62 steal_acc = 0;
63 return 0;
64}
65
66early_param("no-steal-acc", parse_no_stealacc);
67
Marcelo Tosatti3dc4f7c2012-11-27 23:28:56 -020068static int kvmclock_vsyscall = 1;
69static int parse_no_kvmclock_vsyscall(char *arg)
70{
71 kvmclock_vsyscall = 0;
72 return 0;
73}
74
75early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
76
Gleb Natapovfd10cde2010-10-14 11:22:51 +020077static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
Glauber Costad910f5c2011-07-11 15:28:19 -040078static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
79static int has_steal_clock = 0;
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050080
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050081/*
82 * No need for any "IO delay" on KVM
83 */
84static void kvm_io_delay(void)
85{
86}
87
Gleb Natapov631bc482010-10-14 11:22:52 +020088#define KVM_TASK_SLEEP_HASHBITS 8
89#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
90
91struct kvm_task_sleep_node {
92 struct hlist_node link;
93 wait_queue_head_t wq;
94 u32 token;
95 int cpu;
Gleb Natapov6c047cd2010-10-14 11:22:54 +020096 bool halted;
Gleb Natapov631bc482010-10-14 11:22:52 +020097};
98
99static struct kvm_task_sleep_head {
100 spinlock_t lock;
101 struct hlist_head list;
102} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
103
104static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
105 u32 token)
106{
107 struct hlist_node *p;
108
109 hlist_for_each(p, &b->list) {
110 struct kvm_task_sleep_node *n =
111 hlist_entry(p, typeof(*n), link);
112 if (n->token == token)
113 return n;
114 }
115
116 return NULL;
117}
118
119void kvm_async_pf_task_wait(u32 token)
120{
121 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
122 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
123 struct kvm_task_sleep_node n, *e;
124 DEFINE_WAIT(wait);
125
Li Zhong9b132fb2012-12-04 10:35:13 +0800126 rcu_irq_enter();
127
Gleb Natapov631bc482010-10-14 11:22:52 +0200128 spin_lock(&b->lock);
129 e = _find_apf_task(b, token);
130 if (e) {
131 /* dummy entry exist -> wake up was delivered ahead of PF */
132 hlist_del(&e->link);
133 kfree(e);
134 spin_unlock(&b->lock);
Li Zhong9b132fb2012-12-04 10:35:13 +0800135
136 rcu_irq_exit();
Gleb Natapov631bc482010-10-14 11:22:52 +0200137 return;
138 }
139
140 n.token = token;
141 n.cpu = smp_processor_id();
Gleb Natapov859f8452012-11-28 15:19:08 +0200142 n.halted = is_idle_task(current) || preempt_count() > 1;
Gleb Natapov631bc482010-10-14 11:22:52 +0200143 init_waitqueue_head(&n.wq);
144 hlist_add_head(&n.link, &b->list);
145 spin_unlock(&b->lock);
146
147 for (;;) {
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200148 if (!n.halted)
149 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
Gleb Natapov631bc482010-10-14 11:22:52 +0200150 if (hlist_unhashed(&n.link))
151 break;
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200152
153 if (!n.halted) {
154 local_irq_enable();
155 schedule();
156 local_irq_disable();
157 } else {
158 /*
159 * We cannot reschedule. So halt.
160 */
Li Zhong9b132fb2012-12-04 10:35:13 +0800161 rcu_irq_exit();
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200162 native_safe_halt();
Li Zhong9b132fb2012-12-04 10:35:13 +0800163 rcu_irq_enter();
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200164 local_irq_disable();
165 }
Gleb Natapov631bc482010-10-14 11:22:52 +0200166 }
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200167 if (!n.halted)
168 finish_wait(&n.wq, &wait);
Gleb Natapov631bc482010-10-14 11:22:52 +0200169
Li Zhong9b132fb2012-12-04 10:35:13 +0800170 rcu_irq_exit();
Gleb Natapov631bc482010-10-14 11:22:52 +0200171 return;
172}
173EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
174
175static void apf_task_wake_one(struct kvm_task_sleep_node *n)
176{
177 hlist_del_init(&n->link);
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200178 if (n->halted)
179 smp_send_reschedule(n->cpu);
180 else if (waitqueue_active(&n->wq))
Gleb Natapov631bc482010-10-14 11:22:52 +0200181 wake_up(&n->wq);
182}
183
184static void apf_task_wake_all(void)
185{
186 int i;
187
188 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
189 struct hlist_node *p, *next;
190 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
191 spin_lock(&b->lock);
192 hlist_for_each_safe(p, next, &b->list) {
193 struct kvm_task_sleep_node *n =
194 hlist_entry(p, typeof(*n), link);
195 if (n->cpu == smp_processor_id())
196 apf_task_wake_one(n);
197 }
198 spin_unlock(&b->lock);
199 }
200}
201
202void kvm_async_pf_task_wake(u32 token)
203{
204 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
205 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
206 struct kvm_task_sleep_node *n;
207
208 if (token == ~0) {
209 apf_task_wake_all();
210 return;
211 }
212
213again:
214 spin_lock(&b->lock);
215 n = _find_apf_task(b, token);
216 if (!n) {
217 /*
218 * async PF was not yet handled.
219 * Add dummy entry for the token.
220 */
Gleb Natapov62c49cc2012-05-02 15:04:02 +0300221 n = kzalloc(sizeof(*n), GFP_ATOMIC);
Gleb Natapov631bc482010-10-14 11:22:52 +0200222 if (!n) {
223 /*
224 * Allocation failed! Busy wait while other cpu
225 * handles async PF.
226 */
227 spin_unlock(&b->lock);
228 cpu_relax();
229 goto again;
230 }
231 n->token = token;
232 n->cpu = smp_processor_id();
233 init_waitqueue_head(&n->wq);
234 hlist_add_head(&n->link, &b->list);
235 } else
236 apf_task_wake_one(n);
237 spin_unlock(&b->lock);
238 return;
239}
240EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
241
242u32 kvm_read_and_reset_pf_reason(void)
243{
244 u32 reason = 0;
245
246 if (__get_cpu_var(apf_reason).enabled) {
247 reason = __get_cpu_var(apf_reason).reason;
248 __get_cpu_var(apf_reason).reason = 0;
249 }
250
251 return reason;
252}
253EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
254
255dotraplinkage void __kprobes
256do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
257{
Frederic Weisbecker6c1e0252013-02-24 01:19:14 +0100258 enum ctx_state prev_state;
259
Gleb Natapov631bc482010-10-14 11:22:52 +0200260 switch (kvm_read_and_reset_pf_reason()) {
261 default:
262 do_page_fault(regs, error_code);
263 break;
264 case KVM_PV_REASON_PAGE_NOT_PRESENT:
265 /* page is swapped out by the host. */
Frederic Weisbecker6c1e0252013-02-24 01:19:14 +0100266 prev_state = exception_enter();
Sasha Levinc5e015d2012-10-19 12:11:55 -0400267 exit_idle();
Gleb Natapov631bc482010-10-14 11:22:52 +0200268 kvm_async_pf_task_wait((u32)read_cr2());
Frederic Weisbecker6c1e0252013-02-24 01:19:14 +0100269 exception_exit(prev_state);
Gleb Natapov631bc482010-10-14 11:22:52 +0200270 break;
271 case KVM_PV_REASON_PAGE_READY:
Gleb Natapove0875922012-04-04 15:30:33 +0300272 rcu_irq_enter();
273 exit_idle();
Gleb Natapov631bc482010-10-14 11:22:52 +0200274 kvm_async_pf_task_wake((u32)read_cr2());
Gleb Natapove0875922012-04-04 15:30:33 +0300275 rcu_irq_exit();
Gleb Natapov631bc482010-10-14 11:22:52 +0200276 break;
277 }
278}
279
Rakib Mullickd3ac8812009-07-02 11:40:36 +0600280static void __init paravirt_ops_setup(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500281{
282 pv_info.name = "KVM";
283 pv_info.paravirt_enabled = 1;
284
285 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
286 pv_cpu_ops.io_delay = kvm_io_delay;
287
Marcelo Tosattia90ede72009-02-11 22:45:42 -0200288#ifdef CONFIG_X86_IO_APIC
289 no_timer_check = 1;
290#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500291}
292
Glauber Costad910f5c2011-07-11 15:28:19 -0400293static void kvm_register_steal_time(void)
294{
295 int cpu = smp_processor_id();
296 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
297
298 if (!has_steal_clock)
299 return;
300
301 memset(st, 0, sizeof(*st));
302
Dave Hansen5dfd4862013-01-22 13:24:35 -0800303 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
Shuah Khan136867f2013-02-05 19:57:22 -0700304 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
305 cpu, (unsigned long long) slow_virt_to_phys(st));
Glauber Costad910f5c2011-07-11 15:28:19 -0400306}
307
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300308static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
309
310static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
311{
312 /**
313 * This relies on __test_and_clear_bit to modify the memory
314 * in a way that is atomic with respect to the local CPU.
315 * The hypervisor only accesses this memory from the local CPU so
316 * there's no need for lock or memory barriers.
317 * An optimization barrier is implied in apic write.
318 */
319 if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
320 return;
Michael S. Tsirkin90536662012-07-15 15:56:52 +0300321 apic_write(APIC_EOI, APIC_EOI_ACK);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300322}
323
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400324void kvm_guest_cpu_init(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200325{
326 if (!kvm_para_available())
327 return;
328
329 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
Dave Hansen5dfd4862013-01-22 13:24:35 -0800330 u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason));
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200331
Gleb Natapov6adba522010-10-14 11:22:55 +0200332#ifdef CONFIG_PREEMPT
333 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
334#endif
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200335 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
336 __get_cpu_var(apf_reason).enabled = 1;
337 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
338 smp_processor_id());
339 }
Glauber Costad910f5c2011-07-11 15:28:19 -0400340
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300341 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
342 unsigned long pa;
343 /* Size alignment is implied but just to make it explicit. */
344 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
345 __get_cpu_var(kvm_apic_eoi) = 0;
Dave Hansen5dfd4862013-01-22 13:24:35 -0800346 pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi))
347 | KVM_MSR_ENABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300348 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
349 }
350
Glauber Costad910f5c2011-07-11 15:28:19 -0400351 if (has_steal_clock)
352 kvm_register_steal_time();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200353}
354
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300355static void kvm_pv_disable_apf(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200356{
357 if (!__get_cpu_var(apf_reason).enabled)
358 return;
359
360 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
361 __get_cpu_var(apf_reason).enabled = 0;
362
363 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
364 smp_processor_id());
365}
366
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300367static void kvm_pv_guest_cpu_reboot(void *unused)
368{
369 /*
370 * We disable PV EOI before we load a new kernel by kexec,
371 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
372 * New kernel can re-enable when it boots.
373 */
374 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
375 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
376 kvm_pv_disable_apf();
Florian Westphal8fbe6a52012-08-15 16:00:40 +0200377 kvm_disable_steal_time();
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300378}
379
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200380static int kvm_pv_reboot_notify(struct notifier_block *nb,
381 unsigned long code, void *unused)
382{
383 if (code == SYS_RESTART)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300384 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200385 return NOTIFY_DONE;
386}
387
388static struct notifier_block kvm_pv_reboot_nb = {
389 .notifier_call = kvm_pv_reboot_notify,
390};
391
Glauber Costad910f5c2011-07-11 15:28:19 -0400392static u64 kvm_steal_clock(int cpu)
393{
394 u64 steal;
395 struct kvm_steal_time *src;
396 int version;
397
398 src = &per_cpu(steal_time, cpu);
399 do {
400 version = src->version;
401 rmb();
402 steal = src->steal;
403 rmb();
404 } while ((version & 1) || (version != src->version));
405
406 return steal;
407}
408
409void kvm_disable_steal_time(void)
410{
411 if (!has_steal_clock)
412 return;
413
414 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
415}
416
Gleb Natapovca3f1012010-10-14 11:22:49 +0200417#ifdef CONFIG_SMP
418static void __init kvm_smp_prepare_boot_cpu(void)
419{
420 WARN_ON(kvm_register_clock("primary cpu clock"));
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200421 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200422 native_smp_prepare_boot_cpu();
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530423 kvm_spinlock_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200424}
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200425
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400426static void kvm_guest_cpu_online(void *dummy)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200427{
428 kvm_guest_cpu_init();
429}
430
431static void kvm_guest_cpu_offline(void *dummy)
432{
Glauber Costad910f5c2011-07-11 15:28:19 -0400433 kvm_disable_steal_time();
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300434 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
435 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
436 kvm_pv_disable_apf();
Gleb Natapov631bc482010-10-14 11:22:52 +0200437 apf_task_wake_all();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200438}
439
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400440static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
441 void *hcpu)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200442{
443 int cpu = (unsigned long)hcpu;
444 switch (action) {
445 case CPU_ONLINE:
446 case CPU_DOWN_FAILED:
447 case CPU_ONLINE_FROZEN:
448 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
449 break;
450 case CPU_DOWN_PREPARE:
451 case CPU_DOWN_PREPARE_FROZEN:
452 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
453 break;
454 default:
455 break;
456 }
457 return NOTIFY_OK;
458}
459
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400460static struct notifier_block kvm_cpu_notifier = {
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200461 .notifier_call = kvm_cpu_notify,
462};
Gleb Natapovca3f1012010-10-14 11:22:49 +0200463#endif
464
Gleb Natapov631bc482010-10-14 11:22:52 +0200465static void __init kvm_apf_trap_init(void)
466{
467 set_intr_gate(14, &async_page_fault);
468}
469
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500470void __init kvm_guest_init(void)
471{
Gleb Natapov631bc482010-10-14 11:22:52 +0200472 int i;
473
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500474 if (!kvm_para_available())
475 return;
476
477 paravirt_ops_setup();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200478 register_reboot_notifier(&kvm_pv_reboot_nb);
Gleb Natapov631bc482010-10-14 11:22:52 +0200479 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
480 spin_lock_init(&async_pf_sleepers[i].lock);
481 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
482 x86_init.irqs.trap_init = kvm_apf_trap_init;
483
Glauber Costad910f5c2011-07-11 15:28:19 -0400484 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
485 has_steal_clock = 1;
486 pv_time_ops.steal_clock = kvm_steal_clock;
487 }
488
Michael S. Tsirkin90536662012-07-15 15:56:52 +0300489 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
490 apic_set_eoi_write(kvm_guest_apic_eoi_write);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300491
Marcelo Tosatti3dc4f7c2012-11-27 23:28:56 -0200492 if (kvmclock_vsyscall)
493 kvm_setup_vsyscall_timeinfo();
494
Gleb Natapovca3f1012010-10-14 11:22:49 +0200495#ifdef CONFIG_SMP
496 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200497 register_cpu_notifier(&kvm_cpu_notifier);
498#else
499 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200500#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500501}
Glauber Costad910f5c2011-07-11 15:28:19 -0400502
Jason Wang9df56f12013-07-25 16:54:35 +0800503static uint32_t __init kvm_detect(void)
Prarit Bhargavafc733732012-07-06 13:47:39 -0400504{
Jason Wang9df56f12013-07-25 16:54:35 +0800505 return kvm_cpuid_base();
Prarit Bhargavafc733732012-07-06 13:47:39 -0400506}
507
508const struct hypervisor_x86 x86_hyper_kvm __refconst = {
509 .name = "KVM",
510 .detect = kvm_detect,
Alok N Kataria4cca6ea2013-01-17 15:44:42 -0800511 .x2apic_available = kvm_para_available,
Prarit Bhargavafc733732012-07-06 13:47:39 -0400512};
513EXPORT_SYMBOL_GPL(x86_hyper_kvm);
514
Glauber Costad910f5c2011-07-11 15:28:19 -0400515static __init int activate_jump_labels(void)
516{
517 if (has_steal_clock) {
Ingo Molnarc5905af2012-02-24 08:31:31 +0100518 static_key_slow_inc(&paravirt_steal_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400519 if (steal_acc)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100520 static_key_slow_inc(&paravirt_steal_rq_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400521 }
522
523 return 0;
524}
525arch_initcall(activate_jump_labels);
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530526
527#ifdef CONFIG_PARAVIRT_SPINLOCKS
528
529/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
Raghavendra K T36bd6212013-08-16 15:08:41 +0530530static void kvm_kick_cpu(int cpu)
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530531{
532 int apicid;
533 unsigned long flags = 0;
534
535 apicid = per_cpu(x86_cpu_to_apicid, cpu);
536 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
537}
538
539enum kvm_contention_stat {
540 TAKEN_SLOW,
541 TAKEN_SLOW_PICKUP,
542 RELEASED_SLOW,
543 RELEASED_SLOW_KICKED,
544 NR_CONTENTION_STATS
545};
546
547#ifdef CONFIG_KVM_DEBUG_FS
548#define HISTO_BUCKETS 30
549
550static struct kvm_spinlock_stats
551{
552 u32 contention_stats[NR_CONTENTION_STATS];
553 u32 histo_spin_blocked[HISTO_BUCKETS+1];
554 u64 time_blocked;
555} spinlock_stats;
556
557static u8 zero_stats;
558
559static inline void check_zero(void)
560{
561 u8 ret;
562 u8 old;
563
564 old = ACCESS_ONCE(zero_stats);
565 if (unlikely(old)) {
566 ret = cmpxchg(&zero_stats, old, 0);
567 /* This ensures only one fellow resets the stat */
568 if (ret == old)
569 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
570 }
571}
572
573static inline void add_stats(enum kvm_contention_stat var, u32 val)
574{
575 check_zero();
576 spinlock_stats.contention_stats[var] += val;
577}
578
579
580static inline u64 spin_time_start(void)
581{
582 return sched_clock();
583}
584
585static void __spin_time_accum(u64 delta, u32 *array)
586{
587 unsigned index;
588
589 index = ilog2(delta);
590 check_zero();
591
592 if (index < HISTO_BUCKETS)
593 array[index]++;
594 else
595 array[HISTO_BUCKETS]++;
596}
597
598static inline void spin_time_accum_blocked(u64 start)
599{
600 u32 delta;
601
602 delta = sched_clock() - start;
603 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
604 spinlock_stats.time_blocked += delta;
605}
606
607static struct dentry *d_spin_debug;
608static struct dentry *d_kvm_debug;
609
610struct dentry *kvm_init_debugfs(void)
611{
Tim Gardnerd780a312013-10-29 09:13:54 -0600612 d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530613 if (!d_kvm_debug)
614 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
615
616 return d_kvm_debug;
617}
618
619static int __init kvm_spinlock_debugfs(void)
620{
621 struct dentry *d_kvm;
622
623 d_kvm = kvm_init_debugfs();
624 if (d_kvm == NULL)
625 return -ENOMEM;
626
627 d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
628
629 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
630
631 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
632 &spinlock_stats.contention_stats[TAKEN_SLOW]);
633 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
634 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
635
636 debugfs_create_u32("released_slow", 0444, d_spin_debug,
637 &spinlock_stats.contention_stats[RELEASED_SLOW]);
638 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
639 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
640
641 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
642 &spinlock_stats.time_blocked);
643
644 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
645 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
646
647 return 0;
648}
649fs_initcall(kvm_spinlock_debugfs);
650#else /* !CONFIG_KVM_DEBUG_FS */
651static inline void add_stats(enum kvm_contention_stat var, u32 val)
652{
653}
654
655static inline u64 spin_time_start(void)
656{
657 return 0;
658}
659
660static inline void spin_time_accum_blocked(u64 start)
661{
662}
663#endif /* CONFIG_KVM_DEBUG_FS */
664
665struct kvm_lock_waiting {
666 struct arch_spinlock *lock;
667 __ticket_t want;
668};
669
670/* cpus 'waiting' on a spinlock to become available */
671static cpumask_t waiting_cpus;
672
673/* Track spinlock on which a cpu is waiting */
674static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
675
676static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
677{
678 struct kvm_lock_waiting *w;
679 int cpu;
680 u64 start;
681 unsigned long flags;
682
683 if (in_nmi())
684 return;
685
686 w = &__get_cpu_var(klock_waiting);
687 cpu = smp_processor_id();
688 start = spin_time_start();
689
690 /*
691 * Make sure an interrupt handler can't upset things in a
692 * partially setup state.
693 */
694 local_irq_save(flags);
695
696 /*
697 * The ordering protocol on this is that the "lock" pointer
698 * may only be set non-NULL if the "want" ticket is correct.
699 * If we're updating "want", we must first clear "lock".
700 */
701 w->lock = NULL;
702 smp_wmb();
703 w->want = want;
704 smp_wmb();
705 w->lock = lock;
706
707 add_stats(TAKEN_SLOW, 1);
708
709 /*
710 * This uses set_bit, which is atomic but we should not rely on its
711 * reordering gurantees. So barrier is needed after this call.
712 */
713 cpumask_set_cpu(cpu, &waiting_cpus);
714
715 barrier();
716
717 /*
718 * Mark entry to slowpath before doing the pickup test to make
719 * sure we don't deadlock with an unlocker.
720 */
721 __ticket_enter_slowpath(lock);
722
723 /*
724 * check again make sure it didn't become free while
725 * we weren't looking.
726 */
727 if (ACCESS_ONCE(lock->tickets.head) == want) {
728 add_stats(TAKEN_SLOW_PICKUP, 1);
729 goto out;
730 }
731
732 /*
733 * halt until it's our turn and kicked. Note that we do safe halt
734 * for irq enabled case to avoid hang when lock info is overwritten
735 * in irq spinlock slowpath and no spurious interrupt occur to save us.
736 */
737 if (arch_irqs_disabled_flags(flags))
738 halt();
739 else
740 safe_halt();
741
742out:
743 cpumask_clear_cpu(cpu, &waiting_cpus);
744 w->lock = NULL;
745 local_irq_restore(flags);
746 spin_time_accum_blocked(start);
747}
748PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
749
750/* Kick vcpu waiting on @lock->head to reach value @ticket */
751static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
752{
753 int cpu;
754
755 add_stats(RELEASED_SLOW, 1);
756 for_each_cpu(cpu, &waiting_cpus) {
757 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
758 if (ACCESS_ONCE(w->lock) == lock &&
759 ACCESS_ONCE(w->want) == ticket) {
760 add_stats(RELEASED_SLOW_KICKED, 1);
761 kvm_kick_cpu(cpu);
762 break;
763 }
764 }
765}
766
767/*
768 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
769 */
770void __init kvm_spinlock_init(void)
771{
772 if (!kvm_para_available())
773 return;
774 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
775 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
776 return;
777
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530778 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
779 pv_lock_ops.unlock_kick = kvm_unlock_kick;
780}
Raghavendra K T3dbef3e2013-10-09 14:33:21 +0530781
782static __init int kvm_spinlock_init_jump(void)
783{
784 if (!kvm_para_available())
785 return 0;
786 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
787 return 0;
788
789 static_key_slow_inc(&paravirt_ticketlocks_enabled);
790 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
791
792 return 0;
793}
794early_initcall(kvm_spinlock_init_jump);
795
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530796#endif /* CONFIG_PARAVIRT_SPINLOCKS */