blob: 9d5f96321c7f9ef39302194a6e90796fb6e6719f [file] [log] [blame]
Thomas Gleixnerf6ce7f22019-05-19 15:51:49 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05002/*
3 * KVM paravirt_ops implementation
4 *
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05005 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
8 */
9
Zhenzhong Duan5aefd782019-10-23 19:16:21 +080010#define pr_fmt(fmt) "kvm-guest: " fmt
11
Frederic Weisbecker56dd9472013-02-24 00:23:25 +010012#include <linux/context_tracking.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040013#include <linux/init.h>
Paolo Bonzini26d05b32020-06-15 07:53:05 -040014#include <linux/irq.h>
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050015#include <linux/kernel.h>
16#include <linux/kvm_para.h>
17#include <linux/cpu.h>
18#include <linux/mm.h>
Marcelo Tosatti1da8a772008-02-22 12:21:37 -050019#include <linux/highmem.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050020#include <linux/hardirq.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020021#include <linux/notifier.h>
22#include <linux/reboot.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020023#include <linux/hash.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/kprobes.h>
Ulrich Obergfell9919e392014-10-13 15:55:37 -070027#include <linux/nmi.h>
Rik van Riel9db284f2016-03-21 15:13:27 +010028#include <linux/swait.h>
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +020029#include <linux/syscore_ops.h>
Marcelo Tosattia90ede72009-02-11 22:45:42 -020030#include <asm/timer.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020031#include <asm/cpu.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020032#include <asm/traps.h>
33#include <asm/desc.h>
Gleb Natapov6c047cd2010-10-14 11:22:54 +020034#include <asm/tlbflush.h>
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +030035#include <asm/apic.h>
36#include <asm/apicdef.h>
Prarit Bhargavafc733732012-07-06 13:47:39 -040037#include <asm/hypervisor.h>
Peter Zijlstra48a8b972018-08-22 17:30:16 +020038#include <asm/tlb.h>
Yi Wang19308a42019-10-10 14:37:25 +080039#include <asm/cpuidle_haltpoll.h>
Tom Lendacky99419b22020-09-07 15:16:04 +020040#include <asm/ptrace.h>
41#include <asm/svm.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050042
Andy Lutomirskief680172020-02-28 10:42:48 -080043DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
44
Gleb Natapovfd10cde2010-10-14 11:22:51 +020045static int kvmapf = 1;
46
Dou Liyangafdc3f582018-01-17 11:46:54 +080047static int __init parse_no_kvmapf(char *arg)
Gleb Natapovfd10cde2010-10-14 11:22:51 +020048{
49 kvmapf = 0;
50 return 0;
51}
52
53early_param("no-kvmapf", parse_no_kvmapf);
54
Glauber Costad910f5c2011-07-11 15:28:19 -040055static int steal_acc = 1;
Dou Liyangafdc3f582018-01-17 11:46:54 +080056static int __init parse_no_stealacc(char *arg)
Glauber Costad910f5c2011-07-11 15:28:19 -040057{
58 steal_acc = 0;
59 return 0;
60}
61
62early_param("no-steal-acc", parse_no_stealacc);
63
Brijesh Singh47162762017-10-20 09:30:58 -050064static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
Andi Kleen14e581c2019-03-29 17:47:42 -070065DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
Glauber Costad910f5c2011-07-11 15:28:19 -040066static int has_steal_clock = 0;
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050067
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050068/*
69 * No need for any "IO delay" on KVM
70 */
71static void kvm_io_delay(void)
72{
73}
74
Gleb Natapov631bc482010-10-14 11:22:52 +020075#define KVM_TASK_SLEEP_HASHBITS 8
76#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
77
78struct kvm_task_sleep_node {
79 struct hlist_node link;
Rik van Riel9db284f2016-03-21 15:13:27 +010080 struct swait_queue_head wq;
Gleb Natapov631bc482010-10-14 11:22:52 +020081 u32 token;
82 int cpu;
83};
84
85static struct kvm_task_sleep_head {
Rik van Riel9db284f2016-03-21 15:13:27 +010086 raw_spinlock_t lock;
Gleb Natapov631bc482010-10-14 11:22:52 +020087 struct hlist_head list;
88} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
89
90static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
91 u32 token)
92{
93 struct hlist_node *p;
94
95 hlist_for_each(p, &b->list) {
96 struct kvm_task_sleep_node *n =
97 hlist_entry(p, typeof(*n), link);
98 if (n->token == token)
99 return n;
100 }
101
102 return NULL;
103}
104
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200105static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
Gleb Natapov631bc482010-10-14 11:22:52 +0200106{
107 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
108 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100109 struct kvm_task_sleep_node *e;
Li Zhong9b132fb2012-12-04 10:35:13 +0800110
Rik van Riel9db284f2016-03-21 15:13:27 +0100111 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200112 e = _find_apf_task(b, token);
113 if (e) {
114 /* dummy entry exist -> wake up was delivered ahead of PF */
115 hlist_del(&e->link);
Rik van Riel9db284f2016-03-21 15:13:27 +0100116 raw_spin_unlock(&b->lock);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100117 kfree(e);
118 return false;
Gleb Natapov631bc482010-10-14 11:22:52 +0200119 }
120
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100121 n->token = token;
122 n->cpu = smp_processor_id();
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100123 init_swait_queue_head(&n->wq);
124 hlist_add_head(&n->link, &b->list);
Rik van Riel9db284f2016-03-21 15:13:27 +0100125 raw_spin_unlock(&b->lock);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100126 return true;
127}
128
129/*
130 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
131 * @token: Token to identify the sleep node entry
132 *
133 * Invoked from the async pagefault handling code or from the VM exit page
134 * fault handler. In both cases RCU is watching.
135 */
136void kvm_async_pf_task_wait_schedule(u32 token)
137{
138 struct kvm_task_sleep_node n;
139 DECLARE_SWAITQUEUE(wait);
140
141 lockdep_assert_irqs_disabled();
142
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200143 if (!kvm_async_pf_queue_task(token, &n))
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100144 return;
Gleb Natapov631bc482010-10-14 11:22:52 +0200145
146 for (;;) {
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100147 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
Gleb Natapov631bc482010-10-14 11:22:52 +0200148 if (hlist_unhashed(&n.link))
149 break;
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200150
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100151 local_irq_enable();
152 schedule();
153 local_irq_disable();
Gleb Natapov631bc482010-10-14 11:22:52 +0200154 }
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100155 finish_swait(&n.wq, &wait);
Gleb Natapov631bc482010-10-14 11:22:52 +0200156}
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100157EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
158
Gleb Natapov631bc482010-10-14 11:22:52 +0200159static void apf_task_wake_one(struct kvm_task_sleep_node *n)
160{
161 hlist_del_init(&n->link);
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200162 if (swq_has_sleeper(&n->wq))
Peter Zijlstrab3dae102018-06-12 10:34:52 +0200163 swake_up_one(&n->wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200164}
165
166static void apf_task_wake_all(void)
167{
168 int i;
169
170 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
Gleb Natapov631bc482010-10-14 11:22:52 +0200171 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100172 struct kvm_task_sleep_node *n;
173 struct hlist_node *p, *next;
174
Rik van Riel9db284f2016-03-21 15:13:27 +0100175 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200176 hlist_for_each_safe(p, next, &b->list) {
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100177 n = hlist_entry(p, typeof(*n), link);
Gleb Natapov631bc482010-10-14 11:22:52 +0200178 if (n->cpu == smp_processor_id())
179 apf_task_wake_one(n);
180 }
Rik van Riel9db284f2016-03-21 15:13:27 +0100181 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200182 }
183}
184
185void kvm_async_pf_task_wake(u32 token)
186{
187 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
188 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
189 struct kvm_task_sleep_node *n;
190
191 if (token == ~0) {
192 apf_task_wake_all();
193 return;
194 }
195
196again:
Rik van Riel9db284f2016-03-21 15:13:27 +0100197 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200198 n = _find_apf_task(b, token);
199 if (!n) {
200 /*
201 * async PF was not yet handled.
202 * Add dummy entry for the token.
203 */
Gleb Natapov62c49cc2012-05-02 15:04:02 +0300204 n = kzalloc(sizeof(*n), GFP_ATOMIC);
Gleb Natapov631bc482010-10-14 11:22:52 +0200205 if (!n) {
206 /*
207 * Allocation failed! Busy wait while other cpu
208 * handles async PF.
209 */
Rik van Riel9db284f2016-03-21 15:13:27 +0100210 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200211 cpu_relax();
212 goto again;
213 }
214 n->token = token;
215 n->cpu = smp_processor_id();
Rik van Riel9db284f2016-03-21 15:13:27 +0100216 init_swait_queue_head(&n->wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200217 hlist_add_head(&n->link, &b->list);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100218 } else {
Gleb Natapov631bc482010-10-14 11:22:52 +0200219 apf_task_wake_one(n);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100220 }
Rik van Riel9db284f2016-03-21 15:13:27 +0100221 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200222 return;
223}
224EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
225
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200226noinstr u32 kvm_read_and_reset_apf_flags(void)
Gleb Natapov631bc482010-10-14 11:22:52 +0200227{
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200228 u32 flags = 0;
Gleb Natapov631bc482010-10-14 11:22:52 +0200229
Christoph Lameter89cbc762014-08-17 12:30:40 -0500230 if (__this_cpu_read(apf_reason.enabled)) {
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200231 flags = __this_cpu_read(apf_reason.flags);
232 __this_cpu_write(apf_reason.flags, 0);
Gleb Natapov631bc482010-10-14 11:22:52 +0200233 }
234
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200235 return flags;
Gleb Natapov631bc482010-10-14 11:22:52 +0200236}
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +0200237EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
Gleb Natapov631bc482010-10-14 11:22:52 +0200238
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200239noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
Gleb Natapov631bc482010-10-14 11:22:52 +0200240{
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200241 u32 flags = kvm_read_and_reset_apf_flags();
Thomas Gleixnera27a0a52020-07-23 00:00:08 +0200242 irqentry_state_t state;
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100243
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200244 if (!flags)
Andy Lutomirskief680172020-02-28 10:42:48 -0800245 return false;
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100246
Thomas Gleixnera27a0a52020-07-23 00:00:08 +0200247 state = irqentry_enter(regs);
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200248 instrumentation_begin();
249
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100250 /*
251 * If the host managed to inject an async #PF into an interrupt
252 * disabled region, then die hard as this is not going to end well
253 * and the host side is seriously broken.
254 */
255 if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
256 panic("Host injected async #PF in interrupt disabled region\n");
257
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200258 if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
Thomas Gleixner3a7c8fa2020-04-24 09:57:56 +0200259 if (unlikely(!(user_mode(regs))))
260 panic("Host injected async #PF in kernel mode\n");
261 /* Page is swapped out by the host. */
262 kvm_async_pf_task_wait_schedule(token);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100263 } else {
Paolo Bonzini26d05b32020-06-15 07:53:05 -0400264 WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
Gleb Natapov631bc482010-10-14 11:22:52 +0200265 }
Thomas Gleixner91eeafe2020-05-21 22:05:28 +0200266
267 instrumentation_end();
Thomas Gleixnera27a0a52020-07-23 00:00:08 +0200268 irqentry_exit(regs, state);
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100269 return true;
Gleb Natapov631bc482010-10-14 11:22:52 +0200270}
271
Paolo Bonzini26d05b32020-06-15 07:53:05 -0400272DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200273{
Paolo Bonzini26d05b32020-06-15 07:53:05 -0400274 struct pt_regs *old_regs = set_irq_regs(regs);
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200275 u32 token;
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200276
Vitaly Kuznetsovcc17b222020-09-08 15:53:50 +0200277 ack_APIC_irq();
278
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200279 inc_irq_stat(irq_hv_callback_count);
280
281 if (__this_cpu_read(apf_reason.enabled)) {
282 token = __this_cpu_read(apf_reason.token);
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200283 kvm_async_pf_task_wake(token);
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200284 __this_cpu_write(apf_reason.token, 0);
285 wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
286 }
287
Paolo Bonzini26d05b32020-06-15 07:53:05 -0400288 set_irq_regs(old_regs);
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200289}
290
Rakib Mullickd3ac8812009-07-02 11:40:36 +0600291static void __init paravirt_ops_setup(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500292{
293 pv_info.name = "KVM";
Andy Lutomirski29fa6822014-12-05 19:03:28 -0800294
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500295 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
Juergen Gross5c835112018-08-28 09:40:19 +0200296 pv_ops.cpu.io_delay = kvm_io_delay;
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500297
Marcelo Tosattia90ede72009-02-11 22:45:42 -0200298#ifdef CONFIG_X86_IO_APIC
299 no_timer_check = 1;
300#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500301}
302
Glauber Costad910f5c2011-07-11 15:28:19 -0400303static void kvm_register_steal_time(void)
304{
305 int cpu = smp_processor_id();
306 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
307
308 if (!has_steal_clock)
309 return;
310
Dave Hansen5dfd4862013-01-22 13:24:35 -0800311 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800312 pr_info("stealtime: cpu %d, msr %llx\n", cpu,
313 (unsigned long long) slow_virt_to_phys(st));
Glauber Costad910f5c2011-07-11 15:28:19 -0400314}
315
Brijesh Singh47162762017-10-20 09:30:58 -0500316static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300317
Wanpeng Li8ca22552016-11-07 11:13:40 +0800318static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300319{
320 /**
321 * This relies on __test_and_clear_bit to modify the memory
322 * in a way that is atomic with respect to the local CPU.
323 * The hypervisor only accesses this memory from the local CPU so
324 * there's no need for lock or memory barriers.
325 * An optimization barrier is implied in apic write.
326 */
Christoph Lameter89cbc762014-08-17 12:30:40 -0500327 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300328 return;
Wanpeng Li8ca22552016-11-07 11:13:40 +0800329 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300330}
331
Nicholas Krauseed3cf152015-05-20 00:24:10 -0400332static void kvm_guest_cpu_init(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200333{
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200334 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
335 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
Andy Lutomirskief680172020-02-28 10:42:48 -0800336
337 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
338
339 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200340 pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
Wanpeng Li52a5c152017-07-13 18:30:42 -0700341
Radim Krčmářfe2a3022018-02-01 22:16:21 +0100342 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
343 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
344
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200345 wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
346
Radim Krčmářfe2a3022018-02-01 22:16:21 +0100347 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500348 __this_cpu_write(apf_reason.enabled, 1);
Vitaly Kuznetsov0a269a02021-04-14 14:35:40 +0200349 pr_info("setup async PF for cpu %d\n", smp_processor_id());
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200350 }
Glauber Costad910f5c2011-07-11 15:28:19 -0400351
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300352 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
353 unsigned long pa;
Thomas Gleixner6bca69a2020-03-07 00:42:06 +0100354
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300355 /* Size alignment is implied but just to make it explicit. */
356 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500357 __this_cpu_write(kvm_apic_eoi, 0);
358 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
Dave Hansen5dfd4862013-01-22 13:24:35 -0800359 | KVM_MSR_ENABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300360 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
361 }
362
Glauber Costad910f5c2011-07-11 15:28:19 -0400363 if (has_steal_clock)
364 kvm_register_steal_time();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200365}
366
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300367static void kvm_pv_disable_apf(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200368{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500369 if (!__this_cpu_read(apf_reason.enabled))
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200370 return;
371
372 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500373 __this_cpu_write(apf_reason.enabled, 0);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200374
Vitaly Kuznetsov0a269a02021-04-14 14:35:40 +0200375 pr_info("disable async PF for cpu %d\n", smp_processor_id());
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200376}
377
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300378static void kvm_pv_guest_cpu_reboot(void *unused)
379{
380 /*
381 * We disable PV EOI before we load a new kernel by kexec,
382 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
383 * New kernel can re-enable when it boots.
384 */
385 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
386 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
387 kvm_pv_disable_apf();
Florian Westphal8fbe6a52012-08-15 16:00:40 +0200388 kvm_disable_steal_time();
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300389}
390
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200391static int kvm_pv_reboot_notify(struct notifier_block *nb,
392 unsigned long code, void *unused)
393{
394 if (code == SYS_RESTART)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300395 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200396 return NOTIFY_DONE;
397}
398
399static struct notifier_block kvm_pv_reboot_nb = {
400 .notifier_call = kvm_pv_reboot_notify,
401};
402
Glauber Costad910f5c2011-07-11 15:28:19 -0400403static u64 kvm_steal_clock(int cpu)
404{
405 u64 steal;
406 struct kvm_steal_time *src;
407 int version;
408
409 src = &per_cpu(steal_time, cpu);
410 do {
411 version = src->version;
Wanpeng Li5a48a622017-04-11 02:49:21 -0700412 virt_rmb();
Glauber Costad910f5c2011-07-11 15:28:19 -0400413 steal = src->steal;
Wanpeng Li5a48a622017-04-11 02:49:21 -0700414 virt_rmb();
Glauber Costad910f5c2011-07-11 15:28:19 -0400415 } while ((version & 1) || (version != src->version));
416
417 return steal;
418}
419
420void kvm_disable_steal_time(void)
421{
422 if (!has_steal_clock)
423 return;
424
425 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
426}
427
Brijesh Singh47162762017-10-20 09:30:58 -0500428static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
429{
430 early_set_memory_decrypted((unsigned long) ptr, size);
431}
432
433/*
434 * Iterate through all possible CPUs and map the memory region pointed
435 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
436 *
437 * Note: we iterate through all possible CPUs to ensure that CPUs
438 * hotplugged will have their per-cpu variable already mapped as
439 * decrypted.
440 */
441static void __init sev_map_percpu_data(void)
442{
443 int cpu;
444
445 if (!sev_active())
446 return;
447
448 for_each_possible_cpu(cpu) {
449 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
450 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
451 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
452 }
453}
454
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200455static void kvm_guest_cpu_offline(void)
456{
457 kvm_disable_steal_time();
458 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
459 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
460 kvm_pv_disable_apf();
461 apf_task_wake_all();
462}
463
464static int kvm_cpu_online(unsigned int cpu)
465{
466 unsigned long flags;
467
468 local_irq_save(flags);
469 kvm_guest_cpu_init();
470 local_irq_restore(flags);
471 return 0;
472}
473
Wanpeng Li2b519b572021-04-09 12:18:29 +0800474#ifdef CONFIG_SMP
475
476static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
477
Wanpeng Lia262bca2020-02-18 09:08:23 +0800478static bool pv_tlb_flush_supported(void)
479{
480 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
481 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
482 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
483}
484
Wanpeng Lia262bca2020-02-18 09:08:23 +0800485static bool pv_ipi_supported(void)
486{
487 return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
488}
489
490static bool pv_sched_yield_supported(void)
491{
492 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
493 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
494 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
495}
496
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800497#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
498
499static void __send_ipi_mask(const struct cpumask *mask, int vector)
500{
501 unsigned long flags;
502 int cpu, apic_id, icr;
503 int min = 0, max = 0;
504#ifdef CONFIG_X86_64
505 __uint128_t ipi_bitmap = 0;
506#else
507 u64 ipi_bitmap = 0;
508#endif
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800509 long ret;
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800510
511 if (cpumask_empty(mask))
512 return;
513
514 local_irq_save(flags);
515
516 switch (vector) {
517 default:
518 icr = APIC_DM_FIXED | vector;
519 break;
520 case NMI_VECTOR:
521 icr = APIC_DM_NMI;
522 break;
523 }
524
525 for_each_cpu(cpu, mask) {
526 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
527 if (!ipi_bitmap) {
528 min = max = apic_id;
529 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
530 ipi_bitmap <<= min - apic_id;
531 min = apic_id;
532 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
533 max = apic_id < max ? max : apic_id;
534 } else {
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800535 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800536 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800537 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
538 ret);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800539 min = max = apic_id;
540 ipi_bitmap = 0;
541 }
542 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
543 }
544
545 if (ipi_bitmap) {
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800546 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800547 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800548 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
549 ret);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800550 }
551
552 local_irq_restore(flags);
553}
554
555static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
556{
557 __send_ipi_mask(mask, vector);
558}
559
560static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
561{
562 unsigned int this_cpu = smp_processor_id();
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800563 struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800564 const struct cpumask *local_mask;
565
Wanpeng Li8a9442f2020-02-18 09:08:24 +0800566 cpumask_copy(new_mask, mask);
567 cpumask_clear_cpu(this_cpu, new_mask);
568 local_mask = new_mask;
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800569 __send_ipi_mask(local_mask, vector);
570}
571
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800572/*
573 * Set the IPI entry points
574 */
575static void kvm_setup_pv_ipi(void)
576{
577 apic->send_IPI_mask = kvm_send_ipi_mask;
578 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800579 pr_info("setup PV IPIs\n");
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800580}
581
Wanpeng Lif85f6e72019-06-11 20:23:48 +0800582static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
583{
584 int cpu;
585
586 native_send_call_func_ipi(mask);
587
588 /* Make sure other vCPUs get a chance to run if they need to. */
589 for_each_cpu(cpu, mask) {
590 if (vcpu_is_preempted(cpu)) {
591 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
592 break;
593 }
594 }
595}
596
Linus Torvalds152d32a2021-05-01 10:14:08 -0700597static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
Wanpeng Li2b519b572021-04-09 12:18:29 +0800598 const struct flush_tlb_info *info)
599{
600 u8 state;
601 int cpu;
602 struct kvm_steal_time *src;
603 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
604
605 cpumask_copy(flushmask, cpumask);
606 /*
607 * We have to call flush only on online vCPUs. And
608 * queue flush_on_enter for pre-empted vCPUs
609 */
610 for_each_cpu(cpu, flushmask) {
Linus Torvalds152d32a2021-05-01 10:14:08 -0700611 /*
612 * The local vCPU is never preempted, so we do not explicitly
613 * skip check for local vCPU - it will never be cleared from
614 * flushmask.
615 */
Wanpeng Li2b519b572021-04-09 12:18:29 +0800616 src = &per_cpu(steal_time, cpu);
617 state = READ_ONCE(src->preempted);
618 if ((state & KVM_VCPU_PREEMPTED)) {
619 if (try_cmpxchg(&src->preempted, &state,
620 state | KVM_VCPU_FLUSH_TLB))
621 __cpumask_clear_cpu(cpu, flushmask);
622 }
623 }
624
Linus Torvalds152d32a2021-05-01 10:14:08 -0700625 native_flush_tlb_multi(flushmask, info);
Wanpeng Li2b519b572021-04-09 12:18:29 +0800626}
627
628static __init int kvm_alloc_cpumask(void)
629{
630 int cpu;
631
632 if (!kvm_para_available() || nopv)
633 return 0;
634
635 if (pv_tlb_flush_supported() || pv_ipi_supported())
636 for_each_possible_cpu(cpu) {
637 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
638 GFP_KERNEL, cpu_to_node(cpu));
639 }
640
641 return 0;
642}
643arch_initcall(kvm_alloc_cpumask);
644
Gleb Natapovca3f1012010-10-14 11:22:49 +0200645static void __init kvm_smp_prepare_boot_cpu(void)
646{
Brijesh Singh47162762017-10-20 09:30:58 -0500647 /*
648 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
649 * shares the guest physical address with the hypervisor.
650 */
651 sev_map_percpu_data();
652
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200653 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200654 native_smp_prepare_boot_cpu();
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530655 kvm_spinlock_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200656}
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200657
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200658static int kvm_cpu_down_prepare(unsigned int cpu)
659{
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200660 unsigned long flags;
661
662 local_irq_save(flags);
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200663 kvm_guest_cpu_offline();
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200664 local_irq_restore(flags);
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200665 return 0;
666}
Wanpeng Li2b519b572021-04-09 12:18:29 +0800667
Gleb Natapovca3f1012010-10-14 11:22:49 +0200668#endif
669
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200670static int kvm_suspend(void)
671{
672 kvm_guest_cpu_offline();
673
674 return 0;
675}
676
677static void kvm_resume(void)
678{
679 kvm_cpu_online(raw_smp_processor_id());
680}
681
682static struct syscore_ops kvm_syscore_ops = {
683 .suspend = kvm_suspend,
684 .resume = kvm_resume,
685};
686
Juergen Grossf3614642017-11-09 14:27:38 +0100687static void __init kvm_guest_init(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500688{
Gleb Natapov631bc482010-10-14 11:22:52 +0200689 int i;
690
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500691 paravirt_ops_setup();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200692 register_reboot_notifier(&kvm_pv_reboot_nb);
Gleb Natapov631bc482010-10-14 11:22:52 +0200693 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
Rik van Riel9db284f2016-03-21 15:13:27 +0100694 raw_spin_lock_init(&async_pf_sleepers[i].lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200695
Glauber Costad910f5c2011-07-11 15:28:19 -0400696 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
697 has_steal_clock = 1;
Juergen Grossa0e2bf72021-03-11 15:23:09 +0100698 static_call_update(pv_steal_clock, kvm_steal_clock);
Glauber Costad910f5c2011-07-11 15:28:19 -0400699 }
700
Michael S. Tsirkin90536662012-07-15 15:56:52 +0300701 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
702 apic_set_eoi_write(kvm_guest_apic_eoi_write);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300703
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200704 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
Andy Lutomirskief680172020-02-28 10:42:48 -0800705 static_branch_enable(&kvm_async_pf_enabled);
Paolo Bonzini26d05b32020-06-15 07:53:05 -0400706 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
Vitaly Kuznetsovb1d40572020-05-25 16:41:23 +0200707 }
Andy Lutomirskief680172020-02-28 10:42:48 -0800708
Gleb Natapovca3f1012010-10-14 11:22:49 +0200709#ifdef CONFIG_SMP
Wanpeng Li2b519b572021-04-09 12:18:29 +0800710 if (pv_tlb_flush_supported()) {
Linus Torvalds152d32a2021-05-01 10:14:08 -0700711 pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
Wanpeng Li2b519b572021-04-09 12:18:29 +0800712 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
713 pr_info("KVM setup pv remote TLB flush\n");
714 }
715
Gleb Natapovca3f1012010-10-14 11:22:49 +0200716 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
Wanpeng Lia262bca2020-02-18 09:08:23 +0800717 if (pv_sched_yield_supported()) {
Wanpeng Lif85f6e72019-06-11 20:23:48 +0800718 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800719 pr_info("setup PV sched yield\n");
Wanpeng Lif85f6e72019-06-11 20:23:48 +0800720 }
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200721 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
722 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800723 pr_err("failed to install cpu hotplug callbacks\n");
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200724#else
Brijesh Singh47162762017-10-20 09:30:58 -0500725 sev_map_percpu_data();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200726 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200727#endif
Ulrich Obergfell9919e392014-10-13 15:55:37 -0700728
Vitaly Kuznetsov8b79fef2021-04-14 14:35:41 +0200729 register_syscore_ops(&kvm_syscore_ops);
730
Ulrich Obergfell9919e392014-10-13 15:55:37 -0700731 /*
732 * Hard lockup detection is enabled by default. Disable it, as guests
733 * can get false positives too easily, for example if the host is
734 * overcommitted.
735 */
Ulrich Obergfell692297d2015-04-14 15:44:19 -0700736 hardlockup_detector_disable();
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500737}
Glauber Costad910f5c2011-07-11 15:28:19 -0400738
Paolo Bonzini1c300a42014-01-27 14:49:40 +0100739static noinline uint32_t __kvm_cpuid_base(void)
740{
741 if (boot_cpu_data.cpuid_level < 0)
742 return 0; /* So we don't blow up on old processors */
743
Borislav Petkov0c9f35362016-03-29 17:41:55 +0200744 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
Paolo Bonzini1c300a42014-01-27 14:49:40 +0100745 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
746
747 return 0;
748}
749
750static inline uint32_t kvm_cpuid_base(void)
751{
752 static int kvm_cpuid_base = -1;
753
754 if (kvm_cpuid_base == -1)
755 kvm_cpuid_base = __kvm_cpuid_base();
756
757 return kvm_cpuid_base;
758}
759
760bool kvm_para_available(void)
761{
762 return kvm_cpuid_base() != 0;
763}
764EXPORT_SYMBOL_GPL(kvm_para_available);
765
Paolo Bonzini77f01bd2014-01-27 14:51:44 +0100766unsigned int kvm_arch_para_features(void)
767{
768 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
769}
770
Wanpeng Lia4429e52018-02-13 09:05:40 +0800771unsigned int kvm_arch_para_hints(void)
772{
773 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
774}
Wanpeng Li1328edc2019-08-29 16:49:57 +0800775EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
Wanpeng Lia4429e52018-02-13 09:05:40 +0800776
Jason Wang9df56f12013-07-25 16:54:35 +0800777static uint32_t __init kvm_detect(void)
Prarit Bhargavafc733732012-07-06 13:47:39 -0400778{
Jason Wang9df56f12013-07-25 16:54:35 +0800779 return kvm_cpuid_base();
Prarit Bhargavafc733732012-07-06 13:47:39 -0400780}
781
Wanpeng Lid63bae02018-07-23 14:39:51 +0800782static void __init kvm_apic_init(void)
783{
Wanpeng Li2b519b572021-04-09 12:18:29 +0800784#ifdef CONFIG_SMP
Wanpeng Lia262bca2020-02-18 09:08:23 +0800785 if (pv_ipi_supported())
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800786 kvm_setup_pv_ipi();
787#endif
Wanpeng Lid63bae02018-07-23 14:39:51 +0800788}
789
David Woodhouse2e008ff2020-10-24 22:35:35 +0100790static bool __init kvm_msi_ext_dest_id(void)
791{
792 return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
793}
794
Wanpeng Lid63bae02018-07-23 14:39:51 +0800795static void __init kvm_init_platform(void)
796{
Linus Torvaldse61cf2e2018-08-19 10:38:36 -0700797 kvmclock_init();
Wanpeng Lid63bae02018-07-23 14:39:51 +0800798 x86_platform.apic_post_init = kvm_apic_init;
799}
800
Tom Lendacky99419b22020-09-07 15:16:04 +0200801#if defined(CONFIG_AMD_MEM_ENCRYPT)
802static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
803{
804 /* RAX and CPL are already in the GHCB */
805 ghcb_set_rbx(ghcb, regs->bx);
806 ghcb_set_rcx(ghcb, regs->cx);
807 ghcb_set_rdx(ghcb, regs->dx);
808 ghcb_set_rsi(ghcb, regs->si);
809}
810
811static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
812{
813 /* No checking of the return state needed */
814 return true;
815}
816#endif
817
Juergen Gross03b2a322017-11-09 14:27:36 +0100818const __initconst struct hypervisor_x86 x86_hyper_kvm = {
Tom Lendacky99419b22020-09-07 15:16:04 +0200819 .name = "KVM",
820 .detect = kvm_detect,
821 .type = X86_HYPER_KVM,
822 .init.guest_late_init = kvm_guest_init,
823 .init.x2apic_available = kvm_para_available,
David Woodhouse2e008ff2020-10-24 22:35:35 +0100824 .init.msi_ext_dest_id = kvm_msi_ext_dest_id,
Tom Lendacky99419b22020-09-07 15:16:04 +0200825 .init.init_platform = kvm_init_platform,
826#if defined(CONFIG_AMD_MEM_ENCRYPT)
827 .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare,
828 .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish,
829#endif
Prarit Bhargavafc733732012-07-06 13:47:39 -0400830};
Prarit Bhargavafc733732012-07-06 13:47:39 -0400831
Glauber Costad910f5c2011-07-11 15:28:19 -0400832static __init int activate_jump_labels(void)
833{
834 if (has_steal_clock) {
Ingo Molnarc5905af2012-02-24 08:31:31 +0100835 static_key_slow_inc(&paravirt_steal_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400836 if (steal_acc)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100837 static_key_slow_inc(&paravirt_steal_rq_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400838 }
839
840 return 0;
841}
842arch_initcall(activate_jump_labels);
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530843
844#ifdef CONFIG_PARAVIRT_SPINLOCKS
845
846/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
Raghavendra K T36bd6212013-08-16 15:08:41 +0530847static void kvm_kick_cpu(int cpu)
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530848{
849 int apicid;
850 unsigned long flags = 0;
851
852 apicid = per_cpu(x86_cpu_to_apicid, cpu);
853 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
854}
855
Waiman Longbf0c7c32015-04-24 14:56:39 -0400856#include <asm/qspinlock.h>
857
858static void kvm_wait(u8 *ptr, u8 val)
859{
Waiman Longbf0c7c32015-04-24 14:56:39 -0400860 if (in_nmi())
861 return;
862
Waiman Longbf0c7c32015-04-24 14:56:39 -0400863 /*
864 * halt until it's our turn and kicked. Note that we do safe halt
865 * for irq enabled case to avoid hang when lock info is overwritten
866 * in irq spinlock slowpath and no spurious interrupt occur to save us.
867 */
Wanpeng Lif4e61f02021-03-15 14:55:28 +0800868 if (irqs_disabled()) {
869 if (READ_ONCE(*ptr) == val)
870 halt();
871 } else {
872 local_irq_disable();
Waiman Longbf0c7c32015-04-24 14:56:39 -0400873
Wanpeng Lif4e61f02021-03-15 14:55:28 +0800874 if (READ_ONCE(*ptr) == val)
875 safe_halt();
876
877 local_irq_enable();
878 }
Waiman Longbf0c7c32015-04-24 14:56:39 -0400879}
880
Waiman Longdd0fd8b2017-02-20 13:36:04 -0500881#ifdef CONFIG_X86_32
Waiman Long6c629852017-02-20 13:36:03 -0500882__visible bool __kvm_vcpu_is_preempted(long cpu)
Peter Zijlstra3cded412016-11-15 16:47:06 +0100883{
884 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
885
Wanpeng Lifa55eed2017-12-12 17:33:01 -0800886 return !!(src->preempted & KVM_VCPU_PREEMPTED);
Peter Zijlstra3cded412016-11-15 16:47:06 +0100887}
888PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
889
Waiman Longdd0fd8b2017-02-20 13:36:04 -0500890#else
891
892#include <asm/asm-offsets.h>
893
894extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
895
896/*
897 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
898 * restoring to/from the stack.
899 */
900asm(
901".pushsection .text;"
902".global __raw_callee_save___kvm_vcpu_is_preempted;"
903".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
904"__raw_callee_save___kvm_vcpu_is_preempted:"
905"movq __per_cpu_offset(,%rdi,8), %rax;"
906"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
907"setne %al;"
908"ret;"
Josh Poimboeuf083db672019-07-17 20:36:36 -0500909".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
Waiman Longdd0fd8b2017-02-20 13:36:04 -0500910".popsection");
911
912#endif
913
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530914/*
915 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
916 */
917void __init kvm_spinlock_init(void)
918{
Zhenzhong Duan05eee612019-10-23 19:16:22 +0800919 /*
920 * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
921 * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
922 * preferred over native qspinlock when vCPU is preempted.
923 */
924 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
925 pr_info("PV spinlocks disabled, no host support\n");
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530926 return;
Zhenzhong Duande585022019-10-23 19:16:20 +0800927 }
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530928
Zhenzhong Duan05eee612019-10-23 19:16:22 +0800929 /*
930 * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
931 * are available.
932 */
933 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
934 pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
935 goto out;
936 }
Wanpeng Lib2798ba2018-02-13 09:05:41 +0800937
Zhenzhong Duan05eee612019-10-23 19:16:22 +0800938 if (num_possible_cpus() == 1) {
939 pr_info("PV spinlocks disabled, single CPU\n");
940 goto out;
941 }
942
943 if (nopvspin) {
944 pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
945 goto out;
946 }
947
948 pr_info("PV spinlocks enabled\n");
Waiman Long3553ae52018-07-17 17:59:27 -0400949
Waiman Longbf0c7c32015-04-24 14:56:39 -0400950 __pv_init_lock_hash();
Juergen Gross5c835112018-08-28 09:40:19 +0200951 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
952 pv_ops.lock.queued_spin_unlock =
953 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
954 pv_ops.lock.wait = kvm_wait;
955 pv_ops.lock.kick = kvm_kick_cpu;
Peter Zijlstra3cded412016-11-15 16:47:06 +0100956
957 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
Juergen Gross5c835112018-08-28 09:40:19 +0200958 pv_ops.lock.vcpu_is_preempted =
Peter Zijlstra3cded412016-11-15 16:47:06 +0100959 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
960 }
Zhenzhong Duan05eee612019-10-23 19:16:22 +0800961 /*
962 * When PV spinlock is enabled which is preferred over
963 * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
964 * Just disable it anyway.
965 */
966out:
967 static_branch_disable(&virt_spin_lock_key);
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530968}
Raghavendra K T3dbef3e2013-10-09 14:33:21 +0530969
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530970#endif /* CONFIG_PARAVIRT_SPINLOCKS */
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300971
972#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
973
974static void kvm_disable_host_haltpoll(void *i)
975{
976 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
977}
978
979static void kvm_enable_host_haltpoll(void *i)
980{
981 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
982}
983
Joao Martins97d3eb92019-09-02 11:40:31 +0100984void arch_haltpoll_enable(unsigned int cpu)
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300985{
986 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
Zhenzhong Duan5aefd782019-10-23 19:16:21 +0800987 pr_err_once("host does not support poll control\n");
988 pr_err_once("host upgrade recommended\n");
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300989 return;
990 }
991
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300992 /* Enable guest halt poll disables host halt poll */
Joao Martins97d3eb92019-09-02 11:40:31 +0100993 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300994}
995EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
996
Joao Martins97d3eb92019-09-02 11:40:31 +0100997void arch_haltpoll_disable(unsigned int cpu)
Marcelo Tosattia1c44232019-07-03 20:51:29 -0300998{
999 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
1000 return;
1001
Li Qiangb785a442020-09-24 08:58:00 -07001002 /* Disable guest halt poll enables host halt poll */
Joao Martins97d3eb92019-09-02 11:40:31 +01001003 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
Marcelo Tosattia1c44232019-07-03 20:51:29 -03001004}
1005EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
1006#endif