blob: df63786e7bfa46ae490700f771776653ddd1c848 [file] [log] [blame]
Thomas Gleixnerf6ce7f22019-05-19 15:51:49 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05002/*
3 * KVM paravirt_ops implementation
4 *
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05005 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
8 */
9
Frederic Weisbecker56dd9472013-02-24 00:23:25 +010010#include <linux/context_tracking.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040011#include <linux/init.h>
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050012#include <linux/kernel.h>
13#include <linux/kvm_para.h>
14#include <linux/cpu.h>
15#include <linux/mm.h>
Marcelo Tosatti1da8a772008-02-22 12:21:37 -050016#include <linux/highmem.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050017#include <linux/hardirq.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020018#include <linux/notifier.h>
19#include <linux/reboot.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020020#include <linux/hash.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/kprobes.h>
Ulrich Obergfell9919e392014-10-13 15:55:37 -070024#include <linux/nmi.h>
Rik van Riel9db284f2016-03-21 15:13:27 +010025#include <linux/swait.h>
Marcelo Tosattia90ede72009-02-11 22:45:42 -020026#include <asm/timer.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020027#include <asm/cpu.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020028#include <asm/traps.h>
29#include <asm/desc.h>
Gleb Natapov6c047cd2010-10-14 11:22:54 +020030#include <asm/tlbflush.h>
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +030031#include <asm/apic.h>
32#include <asm/apicdef.h>
Prarit Bhargavafc733732012-07-06 13:47:39 -040033#include <asm/hypervisor.h>
Peter Zijlstra48a8b972018-08-22 17:30:16 +020034#include <asm/tlb.h>
Yi Wang19308a42019-10-10 14:37:25 +080035#include <asm/cpuidle_haltpoll.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050036
Andy Lutomirskief680172020-02-28 10:42:48 -080037DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
38
Gleb Natapovfd10cde2010-10-14 11:22:51 +020039static int kvmapf = 1;
40
Dou Liyangafdc3f582018-01-17 11:46:54 +080041static int __init parse_no_kvmapf(char *arg)
Gleb Natapovfd10cde2010-10-14 11:22:51 +020042{
43 kvmapf = 0;
44 return 0;
45}
46
47early_param("no-kvmapf", parse_no_kvmapf);
48
Glauber Costad910f5c2011-07-11 15:28:19 -040049static int steal_acc = 1;
Dou Liyangafdc3f582018-01-17 11:46:54 +080050static int __init parse_no_stealacc(char *arg)
Glauber Costad910f5c2011-07-11 15:28:19 -040051{
52 steal_acc = 0;
53 return 0;
54}
55
56early_param("no-steal-acc", parse_no_stealacc);
57
Brijesh Singh47162762017-10-20 09:30:58 -050058static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
Andi Kleen14e581c2019-03-29 17:47:42 -070059DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
Glauber Costad910f5c2011-07-11 15:28:19 -040060static int has_steal_clock = 0;
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050061
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050062/*
63 * No need for any "IO delay" on KVM
64 */
65static void kvm_io_delay(void)
66{
67}
68
Gleb Natapov631bc482010-10-14 11:22:52 +020069#define KVM_TASK_SLEEP_HASHBITS 8
70#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
71
72struct kvm_task_sleep_node {
73 struct hlist_node link;
Rik van Riel9db284f2016-03-21 15:13:27 +010074 struct swait_queue_head wq;
Gleb Natapov631bc482010-10-14 11:22:52 +020075 u32 token;
76 int cpu;
77};
78
79static struct kvm_task_sleep_head {
Rik van Riel9db284f2016-03-21 15:13:27 +010080 raw_spinlock_t lock;
Gleb Natapov631bc482010-10-14 11:22:52 +020081 struct hlist_head list;
82} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
83
84static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
85 u32 token)
86{
87 struct hlist_node *p;
88
89 hlist_for_each(p, &b->list) {
90 struct kvm_task_sleep_node *n =
91 hlist_entry(p, typeof(*n), link);
92 if (n->token == token)
93 return n;
94 }
95
96 return NULL;
97}
98
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +020099static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
Gleb Natapov631bc482010-10-14 11:22:52 +0200100{
101 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
102 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100103 struct kvm_task_sleep_node *e;
Li Zhong9b132fb2012-12-04 10:35:13 +0800104
Rik van Riel9db284f2016-03-21 15:13:27 +0100105 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200106 e = _find_apf_task(b, token);
107 if (e) {
108 /* dummy entry exist -> wake up was delivered ahead of PF */
109 hlist_del(&e->link);
Rik van Riel9db284f2016-03-21 15:13:27 +0100110 raw_spin_unlock(&b->lock);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100111 kfree(e);
112 return false;
Gleb Natapov631bc482010-10-14 11:22:52 +0200113 }
114
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100115 n->token = token;
116 n->cpu = smp_processor_id();
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100117 init_swait_queue_head(&n->wq);
118 hlist_add_head(&n->link, &b->list);
Rik van Riel9db284f2016-03-21 15:13:27 +0100119 raw_spin_unlock(&b->lock);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100120 return true;
121}
122
123/*
124 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
125 * @token: Token to identify the sleep node entry
126 *
127 * Invoked from the async pagefault handling code or from the VM exit page
128 * fault handler. In both cases RCU is watching.
129 */
130void kvm_async_pf_task_wait_schedule(u32 token)
131{
132 struct kvm_task_sleep_node n;
133 DECLARE_SWAITQUEUE(wait);
134
135 lockdep_assert_irqs_disabled();
136
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200137 if (!kvm_async_pf_queue_task(token, &n))
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100138 return;
Gleb Natapov631bc482010-10-14 11:22:52 +0200139
140 for (;;) {
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100141 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
Gleb Natapov631bc482010-10-14 11:22:52 +0200142 if (hlist_unhashed(&n.link))
143 break;
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200144
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100145 local_irq_enable();
146 schedule();
147 local_irq_disable();
Gleb Natapov631bc482010-10-14 11:22:52 +0200148 }
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100149 finish_swait(&n.wq, &wait);
Gleb Natapov631bc482010-10-14 11:22:52 +0200150}
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100151EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
152
Gleb Natapov631bc482010-10-14 11:22:52 +0200153static void apf_task_wake_one(struct kvm_task_sleep_node *n)
154{
155 hlist_del_init(&n->link);
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200156 if (swq_has_sleeper(&n->wq))
Peter Zijlstrab3dae102018-06-12 10:34:52 +0200157 swake_up_one(&n->wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200158}
159
160static void apf_task_wake_all(void)
161{
162 int i;
163
164 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
Gleb Natapov631bc482010-10-14 11:22:52 +0200165 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100166 struct kvm_task_sleep_node *n;
167 struct hlist_node *p, *next;
168
Rik van Riel9db284f2016-03-21 15:13:27 +0100169 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200170 hlist_for_each_safe(p, next, &b->list) {
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100171 n = hlist_entry(p, typeof(*n), link);
Gleb Natapov631bc482010-10-14 11:22:52 +0200172 if (n->cpu == smp_processor_id())
173 apf_task_wake_one(n);
174 }
Rik van Riel9db284f2016-03-21 15:13:27 +0100175 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200176 }
177}
178
179void kvm_async_pf_task_wake(u32 token)
180{
181 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
182 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
183 struct kvm_task_sleep_node *n;
184
185 if (token == ~0) {
186 apf_task_wake_all();
187 return;
188 }
189
190again:
Rik van Riel9db284f2016-03-21 15:13:27 +0100191 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200192 n = _find_apf_task(b, token);
193 if (!n) {
194 /*
195 * async PF was not yet handled.
196 * Add dummy entry for the token.
197 */
Gleb Natapov62c49cc2012-05-02 15:04:02 +0300198 n = kzalloc(sizeof(*n), GFP_ATOMIC);
Gleb Natapov631bc482010-10-14 11:22:52 +0200199 if (!n) {
200 /*
201 * Allocation failed! Busy wait while other cpu
202 * handles async PF.
203 */
Rik van Riel9db284f2016-03-21 15:13:27 +0100204 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200205 cpu_relax();
206 goto again;
207 }
208 n->token = token;
209 n->cpu = smp_processor_id();
Rik van Riel9db284f2016-03-21 15:13:27 +0100210 init_swait_queue_head(&n->wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200211 hlist_add_head(&n->link, &b->list);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100212 } else {
Gleb Natapov631bc482010-10-14 11:22:52 +0200213 apf_task_wake_one(n);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100214 }
Rik van Riel9db284f2016-03-21 15:13:27 +0100215 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200216 return;
217}
218EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
219
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200220noinstr u32 kvm_read_and_reset_apf_flags(void)
Gleb Natapov631bc482010-10-14 11:22:52 +0200221{
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200222 u32 flags = 0;
Gleb Natapov631bc482010-10-14 11:22:52 +0200223
Christoph Lameter89cbc762014-08-17 12:30:40 -0500224 if (__this_cpu_read(apf_reason.enabled)) {
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200225 flags = __this_cpu_read(apf_reason.flags);
226 __this_cpu_write(apf_reason.flags, 0);
Gleb Natapov631bc482010-10-14 11:22:52 +0200227 }
228
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200229 return flags;
Gleb Natapov631bc482010-10-14 11:22:52 +0200230}
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200231EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
Gleb Natapov631bc482010-10-14 11:22:52 +0200232
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200233noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
Gleb Natapov631bc482010-10-14 11:22:52 +0200234{
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200235 u32 reason = kvm_read_and_reset_apf_flags();
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200236 bool rcu_exit;
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100237
238 switch (reason) {
239 case KVM_PV_REASON_PAGE_NOT_PRESENT:
240 case KVM_PV_REASON_PAGE_READY:
241 break;
Gleb Natapov631bc482010-10-14 11:22:52 +0200242 default:
Andy Lutomirskief680172020-02-28 10:42:48 -0800243 return false;
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100244 }
245
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200246 rcu_exit = idtentry_enter_cond_rcu(regs);
247 instrumentation_begin();
248
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100249 /*
250 * If the host managed to inject an async #PF into an interrupt
251 * disabled region, then die hard as this is not going to end well
252 * and the host side is seriously broken.
253 */
254 if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
255 panic("Host injected async #PF in interrupt disabled region\n");
256
257 if (reason == KVM_PV_REASON_PAGE_NOT_PRESENT) {
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200258 if (unlikely(!(user_mode(regs))))
259 panic("Host injected async #PF in kernel mode\n");
260 /* Page is swapped out by the host. */
261 kvm_async_pf_task_wait_schedule(token);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100262 } else {
Andy Lutomirskief680172020-02-28 10:42:48 -0800263 kvm_async_pf_task_wake(token);
Gleb Natapov631bc482010-10-14 11:22:52 +0200264 }
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200265
266 instrumentation_end();
267 idtentry_exit_cond_rcu(regs, rcu_exit);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100268 return true;
Gleb Natapov631bc482010-10-14 11:22:52 +0200269}
270
Rakib Mullickd3ac8812009-07-02 11:40:36 +0600271static void __init paravirt_ops_setup(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500272{
273 pv_info.name = "KVM";
Andy Lutomirski29fa6822014-12-05 19:03:28 -0800274
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500275 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
Juergen Gross5c835112018-08-28 09:40:19 +0200276 pv_ops.cpu.io_delay = kvm_io_delay;
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500277
Marcelo Tosattia90ede72009-02-11 22:45:42 -0200278#ifdef CONFIG_X86_IO_APIC
279 no_timer_check = 1;
280#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500281}
282
Glauber Costad910f5c2011-07-11 15:28:19 -0400283static void kvm_register_steal_time(void)
284{
285 int cpu = smp_processor_id();
286 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
287
288 if (!has_steal_clock)
289 return;
290
Dave Hansen5dfd4862013-01-22 13:24:35 -0800291 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
Shuah Khan136867f2013-02-05 19:57:22 -0700292 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
293 cpu, (unsigned long long) slow_virt_to_phys(st));
Glauber Costad910f5c2011-07-11 15:28:19 -0400294}
295
Brijesh Singh47162762017-10-20 09:30:58 -0500296static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300297
Wanpeng Li8ca22552016-11-07 11:13:40 +0800298static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300299{
300 /**
301 * This relies on __test_and_clear_bit to modify the memory
302 * in a way that is atomic with respect to the local CPU.
303 * The hypervisor only accesses this memory from the local CPU so
304 * there's no need for lock or memory barriers.
305 * An optimization barrier is implied in apic write.
306 */
Christoph Lameter89cbc762014-08-17 12:30:40 -0500307 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300308 return;
Wanpeng Li8ca22552016-11-07 11:13:40 +0800309 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300310}
311
Nicholas Krauseed3cf152015-05-20 00:24:10 -0400312static void kvm_guest_cpu_init(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200313{
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200314 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
Andy Lutomirskief680172020-02-28 10:42:48 -0800315 u64 pa;
316
317 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
318
319 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
Wanpeng Li52a5c152017-07-13 18:30:42 -0700320 pa |= KVM_ASYNC_PF_ENABLED;
321
Radim Krčmářfe2a3022018-02-01 22:16:21 +0100322 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
323 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
324
325 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500326 __this_cpu_write(apf_reason.enabled, 1);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100327 pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200328 }
Glauber Costad910f5c2011-07-11 15:28:19 -0400329
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300330 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
331 unsigned long pa;
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100332
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300333 /* Size alignment is implied but just to make it explicit. */
334 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500335 __this_cpu_write(kvm_apic_eoi, 0);
336 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
Dave Hansen5dfd4862013-01-22 13:24:35 -0800337 | KVM_MSR_ENABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300338 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
339 }
340
Glauber Costad910f5c2011-07-11 15:28:19 -0400341 if (has_steal_clock)
342 kvm_register_steal_time();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200343}
344
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300345static void kvm_pv_disable_apf(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200346{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500347 if (!__this_cpu_read(apf_reason.enabled))
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200348 return;
349
350 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500351 __this_cpu_write(apf_reason.enabled, 0);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200352
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100353 pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200354}
355
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300356static void kvm_pv_guest_cpu_reboot(void *unused)
357{
358 /*
359 * We disable PV EOI before we load a new kernel by kexec,
360 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
361 * New kernel can re-enable when it boots.
362 */
363 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
364 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
365 kvm_pv_disable_apf();
Florian Westphal8fbe6a52012-08-15 16:00:40 +0200366 kvm_disable_steal_time();
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300367}
368
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200369static int kvm_pv_reboot_notify(struct notifier_block *nb,
370 unsigned long code, void *unused)
371{
372 if (code == SYS_RESTART)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300373 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200374 return NOTIFY_DONE;
375}
376
377static struct notifier_block kvm_pv_reboot_nb = {
378 .notifier_call = kvm_pv_reboot_notify,
379};
380
Glauber Costad910f5c2011-07-11 15:28:19 -0400381static u64 kvm_steal_clock(int cpu)
382{
383 u64 steal;
384 struct kvm_steal_time *src;
385 int version;
386
387 src = &per_cpu(steal_time, cpu);
388 do {
389 version = src->version;
Wanpeng Li5a48a622017-04-11 02:49:21 -0700390 virt_rmb();
Glauber Costad910f5c2011-07-11 15:28:19 -0400391 steal = src->steal;
Wanpeng Li5a48a622017-04-11 02:49:21 -0700392 virt_rmb();
Glauber Costad910f5c2011-07-11 15:28:19 -0400393 } while ((version & 1) || (version != src->version));
394
395 return steal;
396}
397
398void kvm_disable_steal_time(void)
399{
400 if (!has_steal_clock)
401 return;
402
403 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
404}
405
Brijesh Singh47162762017-10-20 09:30:58 -0500406static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
407{
408 early_set_memory_decrypted((unsigned long) ptr, size);
409}
410
411/*
412 * Iterate through all possible CPUs and map the memory region pointed
413 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
414 *
415 * Note: we iterate through all possible CPUs to ensure that CPUs
416 * hotplugged will have their per-cpu variable already mapped as
417 * decrypted.
418 */
419static void __init sev_map_percpu_data(void)
420{
421 int cpu;
422
423 if (!sev_active())
424 return;
425
426 for_each_possible_cpu(cpu) {
427 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
428 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
429 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
430 }
431}
432
Wanpeng Lia262bca2020-02-18 09:08:23 +0800433static bool pv_tlb_flush_supported(void)
434{
435 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
436 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
437 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
438}
439
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800440static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
441
Gleb Natapovca3f1012010-10-14 11:22:49 +0200442#ifdef CONFIG_SMP
Wanpeng Lia262bca2020-02-18 09:08:23 +0800443
444static bool pv_ipi_supported(void)
445{
446 return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
447}
448
449static bool pv_sched_yield_supported(void)
450{
451 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
452 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
453 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
454}
455
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800456#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
457
458static void __send_ipi_mask(const struct cpumask *mask, int vector)
459{
460 unsigned long flags;
461 int cpu, apic_id, icr;
462 int min = 0, max = 0;
463#ifdef CONFIG_X86_64
464 __uint128_t ipi_bitmap = 0;
465#else
466 u64 ipi_bitmap = 0;
467#endif
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800468 long ret;
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800469
470 if (cpumask_empty(mask))
471 return;
472
473 local_irq_save(flags);
474
475 switch (vector) {
476 default:
477 icr = APIC_DM_FIXED | vector;
478 break;
479 case NMI_VECTOR:
480 icr = APIC_DM_NMI;
481 break;
482 }
483
484 for_each_cpu(cpu, mask) {
485 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
486 if (!ipi_bitmap) {
487 min = max = apic_id;
488 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
489 ipi_bitmap <<= min - apic_id;
490 min = apic_id;
491 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
492 max = apic_id < max ? max : apic_id;
493 } else {
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800494 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800495 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800496 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800497 min = max = apic_id;
498 ipi_bitmap = 0;
499 }
500 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
501 }
502
503 if (ipi_bitmap) {
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800504 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800505 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800506 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800507 }
508
509 local_irq_restore(flags);
510}
511
512static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
513{
514 __send_ipi_mask(mask, vector);
515}
516
517static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
518{
519 unsigned int this_cpu = smp_processor_id();
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800520 struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800521 const struct cpumask *local_mask;
522
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800523 cpumask_copy(new_mask, mask);
524 cpumask_clear_cpu(this_cpu, new_mask);
525 local_mask = new_mask;
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800526 __send_ipi_mask(local_mask, vector);
527}
528
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800529/*
530 * Set the IPI entry points
531 */
532static void kvm_setup_pv_ipi(void)
533{
534 apic->send_IPI_mask = kvm_send_ipi_mask;
535 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800536 pr_info("KVM setup pv IPIs\n");
537}
538
Wanpeng Lif85f6e72019-06-11 20:23:48 +0800539static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
540{
541 int cpu;
542
543 native_send_call_func_ipi(mask);
544
545 /* Make sure other vCPUs get a chance to run if they need to. */
546 for_each_cpu(cpu, mask) {
547 if (vcpu_is_preempted(cpu)) {
548 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
549 break;
550 }
551 }
552}
553
Wanpeng Li34226b62018-03-24 21:17:24 -0700554static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
555{
556 native_smp_prepare_cpus(max_cpus);
Michael S. Tsirkin633711e2018-05-17 17:54:24 +0300557 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
Wanpeng Li34226b62018-03-24 21:17:24 -0700558 static_branch_disable(&virt_spin_lock_key);
559}
560
Gleb Natapovca3f1012010-10-14 11:22:49 +0200561static void __init kvm_smp_prepare_boot_cpu(void)
562{
Brijesh Singh47162762017-10-20 09:30:58 -0500563 /*
564 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
565 * shares the guest physical address with the hypervisor.
566 */
567 sev_map_percpu_data();
568
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200569 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200570 native_smp_prepare_boot_cpu();
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530571 kvm_spinlock_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200572}
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200573
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200574static void kvm_guest_cpu_offline(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200575{
Glauber Costad910f5c2011-07-11 15:28:19 -0400576 kvm_disable_steal_time();
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300577 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
578 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
579 kvm_pv_disable_apf();
Gleb Natapov631bc482010-10-14 11:22:52 +0200580 apf_task_wake_all();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200581}
582
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200583static int kvm_cpu_online(unsigned int cpu)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200584{
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200585 local_irq_disable();
586 kvm_guest_cpu_init();
587 local_irq_enable();
588 return 0;
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200589}
590
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200591static int kvm_cpu_down_prepare(unsigned int cpu)
592{
593 local_irq_disable();
594 kvm_guest_cpu_offline();
595 local_irq_enable();
596 return 0;
597}
Gleb Natapovca3f1012010-10-14 11:22:49 +0200598#endif
599
Wanpeng Li858a43a2017-12-12 17:33:02 -0800600static void kvm_flush_tlb_others(const struct cpumask *cpumask,
601 const struct flush_tlb_info *info)
602{
603 u8 state;
604 int cpu;
605 struct kvm_steal_time *src;
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800606 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
Wanpeng Li858a43a2017-12-12 17:33:02 -0800607
608 cpumask_copy(flushmask, cpumask);
609 /*
610 * We have to call flush only on online vCPUs. And
611 * queue flush_on_enter for pre-empted vCPUs
612 */
613 for_each_cpu(cpu, flushmask) {
614 src = &per_cpu(steal_time, cpu);
615 state = READ_ONCE(src->preempted);
616 if ((state & KVM_VCPU_PREEMPTED)) {
617 if (try_cmpxchg(&src->preempted, &state,
618 state | KVM_VCPU_FLUSH_TLB))
619 __cpumask_clear_cpu(cpu, flushmask);
620 }
621 }
622
623 native_flush_tlb_others(flushmask, info);
624}
625
Juergen Grossf3614642017-11-09 14:27:38 +0100626static void __init kvm_guest_init(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500627{
Gleb Natapov631bc482010-10-14 11:22:52 +0200628 int i;
629
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500630 paravirt_ops_setup();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200631 register_reboot_notifier(&kvm_pv_reboot_nb);
Gleb Natapov631bc482010-10-14 11:22:52 +0200632 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
Rik van Riel9db284f2016-03-21 15:13:27 +0100633 raw_spin_lock_init(&async_pf_sleepers[i].lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200634
Glauber Costad910f5c2011-07-11 15:28:19 -0400635 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
636 has_steal_clock = 1;
Juergen Gross5c835112018-08-28 09:40:19 +0200637 pv_ops.time.steal_clock = kvm_steal_clock;
Glauber Costad910f5c2011-07-11 15:28:19 -0400638 }
639
Wanpeng Lia262bca2020-02-18 09:08:23 +0800640 if (pv_tlb_flush_supported()) {
Juergen Gross5c835112018-08-28 09:40:19 +0200641 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
642 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800643 pr_info("KVM setup pv remote TLB flush\n");
Peter Zijlstra48a8b972018-08-22 17:30:16 +0200644 }
Wanpeng Li858a43a2017-12-12 17:33:02 -0800645
Michael S. Tsirkin90536662012-07-15 15:56:52 +0300646 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
647 apic_set_eoi_write(kvm_guest_apic_eoi_write);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300648
Andy Lutomirskief680172020-02-28 10:42:48 -0800649 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf)
650 static_branch_enable(&kvm_async_pf_enabled);
651
Gleb Natapovca3f1012010-10-14 11:22:49 +0200652#ifdef CONFIG_SMP
Wanpeng Li34226b62018-03-24 21:17:24 -0700653 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
Gleb Natapovca3f1012010-10-14 11:22:49 +0200654 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
Wanpeng Lia262bca2020-02-18 09:08:23 +0800655 if (pv_sched_yield_supported()) {
Wanpeng Lif85f6e72019-06-11 20:23:48 +0800656 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
657 pr_info("KVM setup pv sched yield\n");
658 }
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200659 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
660 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
661 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200662#else
Brijesh Singh47162762017-10-20 09:30:58 -0500663 sev_map_percpu_data();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200664 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200665#endif
Ulrich Obergfell9919e392014-10-13 15:55:37 -0700666
667 /*
668 * Hard lockup detection is enabled by default. Disable it, as guests
669 * can get false positives too easily, for example if the host is
670 * overcommitted.
671 */
Ulrich Obergfell692297d2015-04-14 15:44:19 -0700672 hardlockup_detector_disable();
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500673}
Glauber Costad910f5c2011-07-11 15:28:19 -0400674
Paolo Bonzini1c300a42014-01-27 14:49:40 +0100675static noinline uint32_t __kvm_cpuid_base(void)
676{
677 if (boot_cpu_data.cpuid_level < 0)
678 return 0; /* So we don't blow up on old processors */
679
Borislav Petkov0c9f35362016-03-29 17:41:55 +0200680 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
Paolo Bonzini1c300a42014-01-27 14:49:40 +0100681 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
682
683 return 0;
684}
685
686static inline uint32_t kvm_cpuid_base(void)
687{
688 static int kvm_cpuid_base = -1;
689
690 if (kvm_cpuid_base == -1)
691 kvm_cpuid_base = __kvm_cpuid_base();
692
693 return kvm_cpuid_base;
694}
695
696bool kvm_para_available(void)
697{
698 return kvm_cpuid_base() != 0;
699}
700EXPORT_SYMBOL_GPL(kvm_para_available);
701
Paolo Bonzini77f01bd2014-01-27 14:51:44 +0100702unsigned int kvm_arch_para_features(void)
703{
704 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
705}
706
Wanpeng Lia4429e52018-02-13 09:05:40 +0800707unsigned int kvm_arch_para_hints(void)
708{
709 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
710}
Wanpeng Li1328edc2019-08-29 16:49:57 +0800711EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
Wanpeng Lia4429e52018-02-13 09:05:40 +0800712
Jason Wang9df56f12013-07-25 16:54:35 +0800713static uint32_t __init kvm_detect(void)
Prarit Bhargavafc733732012-07-06 13:47:39 -0400714{
Jason Wang9df56f12013-07-25 16:54:35 +0800715 return kvm_cpuid_base();
Prarit Bhargavafc733732012-07-06 13:47:39 -0400716}
717
Wanpeng Lid63bae02018-07-23 14:39:51 +0800718static void __init kvm_apic_init(void)
719{
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800720#if defined(CONFIG_SMP)
Wanpeng Lia262bca2020-02-18 09:08:23 +0800721 if (pv_ipi_supported())
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800722 kvm_setup_pv_ipi();
723#endif
Wanpeng Lid63bae02018-07-23 14:39:51 +0800724}
725
726static void __init kvm_init_platform(void)
727{
Linus Torvaldse61cf2e2018-08-19 10:38:36 -0700728 kvmclock_init();
Wanpeng Lid63bae02018-07-23 14:39:51 +0800729 x86_platform.apic_post_init = kvm_apic_init;
730}
731
Juergen Gross03b2a322017-11-09 14:27:36 +0100732const __initconst struct hypervisor_x86 x86_hyper_kvm = {
Prarit Bhargavafc733732012-07-06 13:47:39 -0400733 .name = "KVM",
734 .detect = kvm_detect,
Juergen Gross03b2a322017-11-09 14:27:36 +0100735 .type = X86_HYPER_KVM,
Juergen Grossf3614642017-11-09 14:27:38 +0100736 .init.guest_late_init = kvm_guest_init,
Juergen Grossf72e38e2017-11-09 14:27:35 +0100737 .init.x2apic_available = kvm_para_available,
Wanpeng Lid63bae02018-07-23 14:39:51 +0800738 .init.init_platform = kvm_init_platform,
Prarit Bhargavafc733732012-07-06 13:47:39 -0400739};
Prarit Bhargavafc733732012-07-06 13:47:39 -0400740
Glauber Costad910f5c2011-07-11 15:28:19 -0400741static __init int activate_jump_labels(void)
742{
743 if (has_steal_clock) {
Ingo Molnarc5905af2012-02-24 08:31:31 +0100744 static_key_slow_inc(&paravirt_steal_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400745 if (steal_acc)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100746 static_key_slow_inc(&paravirt_steal_rq_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400747 }
748
749 return 0;
750}
751arch_initcall(activate_jump_labels);
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530752
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800753static __init int kvm_alloc_cpumask(void)
Wanpeng Li858a43a2017-12-12 17:33:02 -0800754{
755 int cpu;
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800756 bool alloc = false;
Wanpeng Li858a43a2017-12-12 17:33:02 -0800757
Thadeu Lima de Souza Cascardo64b38bd2020-01-31 12:56:55 -0300758 if (!kvm_para_available() || nopv)
759 return 0;
760
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800761 if (pv_tlb_flush_supported())
762 alloc = true;
763
764#if defined(CONFIG_SMP)
765 if (pv_ipi_supported())
766 alloc = true;
767#endif
768
769 if (alloc)
Wanpeng Li858a43a2017-12-12 17:33:02 -0800770 for_each_possible_cpu(cpu) {
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800771 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
Wanpeng Li858a43a2017-12-12 17:33:02 -0800772 GFP_KERNEL, cpu_to_node(cpu));
773 }
Wanpeng Li858a43a2017-12-12 17:33:02 -0800774
775 return 0;
776}
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800777arch_initcall(kvm_alloc_cpumask);
Wanpeng Li858a43a2017-12-12 17:33:02 -0800778
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530779#ifdef CONFIG_PARAVIRT_SPINLOCKS
780
781/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
Raghavendra K T36bd6212013-08-16 15:08:41 +0530782static void kvm_kick_cpu(int cpu)
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530783{
784 int apicid;
785 unsigned long flags = 0;
786
787 apicid = per_cpu(x86_cpu_to_apicid, cpu);
788 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
789}
790
Waiman Longbf0c7c32015-04-24 14:56:39 -0400791#include <asm/qspinlock.h>
792
793static void kvm_wait(u8 *ptr, u8 val)
794{
795 unsigned long flags;
796
797 if (in_nmi())
798 return;
799
800 local_irq_save(flags);
801
802 if (READ_ONCE(*ptr) != val)
803 goto out;
804
805 /*
806 * halt until it's our turn and kicked. Note that we do safe halt
807 * for irq enabled case to avoid hang when lock info is overwritten
808 * in irq spinlock slowpath and no spurious interrupt occur to save us.
809 */
810 if (arch_irqs_disabled_flags(flags))
811 halt();
812 else
813 safe_halt();
814
815out:
816 local_irq_restore(flags);
817}
818
Waiman Longdd0fd8b2017-02-20 13:36:04 -0500819#ifdef CONFIG_X86_32
Waiman Long6c629852017-02-20 13:36:03 -0500820__visible bool __kvm_vcpu_is_preempted(long cpu)
Peter Zijlstra3cded412016-11-15 16:47:06 +0100821{
822 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
823
Wanpeng Lifa55eed2017-12-12 17:33:01 -0800824 return !!(src->preempted & KVM_VCPU_PREEMPTED);
Peter Zijlstra3cded412016-11-15 16:47:06 +0100825}
826PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
827
Waiman Longdd0fd8b2017-02-20 13:36:04 -0500828#else
829
830#include <asm/asm-offsets.h>
831
832extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
833
834/*
835 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
836 * restoring to/from the stack.
837 */
838asm(
839".pushsection .text;"
840".global __raw_callee_save___kvm_vcpu_is_preempted;"
841".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
842"__raw_callee_save___kvm_vcpu_is_preempted:"
843"movq __per_cpu_offset(,%rdi,8), %rax;"
844"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
845"setne %al;"
846"ret;"
Josh Poimboeuf083db672019-07-17 20:36:36 -0500847".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
Waiman Longdd0fd8b2017-02-20 13:36:04 -0500848".popsection");
849
850#endif
851
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530852/*
853 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
854 */
855void __init kvm_spinlock_init(void)
856{
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530857 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
858 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
859 return;
860
Michael S. Tsirkin633711e2018-05-17 17:54:24 +0300861 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
Wanpeng Lib2798ba2018-02-13 09:05:41 +0800862 return;
Wanpeng Lib2798ba2018-02-13 09:05:41 +0800863
Waiman Long3553ae52018-07-17 17:59:27 -0400864 /* Don't use the pvqspinlock code if there is only 1 vCPU. */
865 if (num_possible_cpus() == 1)
866 return;
867
Waiman Longbf0c7c32015-04-24 14:56:39 -0400868 __pv_init_lock_hash();
Juergen Gross5c835112018-08-28 09:40:19 +0200869 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
870 pv_ops.lock.queued_spin_unlock =
871 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
872 pv_ops.lock.wait = kvm_wait;
873 pv_ops.lock.kick = kvm_kick_cpu;
Peter Zijlstra3cded412016-11-15 16:47:06 +0100874
875 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
Juergen Gross5c835112018-08-28 09:40:19 +0200876 pv_ops.lock.vcpu_is_preempted =
Peter Zijlstra3cded412016-11-15 16:47:06 +0100877 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
878 }
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530879}
Raghavendra K T3dbef3e2013-10-09 14:33:21 +0530880
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530881#endif /* CONFIG_PARAVIRT_SPINLOCKS */
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300882
883#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
884
885static void kvm_disable_host_haltpoll(void *i)
886{
887 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
888}
889
890static void kvm_enable_host_haltpoll(void *i)
891{
892 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
893}
894
Joao Martins97d3eb92019-09-02 11:40:31 +0100895void arch_haltpoll_enable(unsigned int cpu)
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300896{
897 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
Joao Martins97d3eb92019-09-02 11:40:31 +0100898 pr_err_once("kvm: host does not support poll control\n");
899 pr_err_once("kvm: host upgrade recommended\n");
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300900 return;
901 }
902
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300903 /* Enable guest halt poll disables host halt poll */
Joao Martins97d3eb92019-09-02 11:40:31 +0100904 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300905}
906EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
907
Joao Martins97d3eb92019-09-02 11:40:31 +0100908void arch_haltpoll_disable(unsigned int cpu)
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300909{
910 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
911 return;
912
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300913 /* Enable guest halt poll disables host halt poll */
Joao Martins97d3eb92019-09-02 11:40:31 +0100914 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300915}
916EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
917#endif