blob: 59b1dd4a549ee041767628b4a3c1f2d7992e7abb [file] [log] [blame]
Thomas Gleixner775c8a32019-06-04 10:11:37 +02001// SPDX-License-Identifier: GPL-2.0-only
Gregory Haskins721eecbf2009-05-20 10:30:49 -04002/*
3 * kvm eventfd support - use eventfd objects to signal various KVM events
4 *
5 * Copyright 2009 Novell. All Rights Reserved.
Avi Kivity221d0592010-05-23 18:37:00 +03006 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Gregory Haskins721eecbf2009-05-20 10:30:49 -04007 *
8 * Author:
9 * Gregory Haskins <ghaskins@novell.com>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040010 */
11
12#include <linux/kvm_host.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040013#include <linux/kvm.h>
Eric Auger166c9772015-09-18 22:29:42 +080014#include <linux/kvm_irqfd.h>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040015#include <linux/workqueue.h>
16#include <linux/syscalls.h>
17#include <linux/wait.h>
18#include <linux/poll.h>
19#include <linux/file.h>
20#include <linux/list.h>
21#include <linux/eventfd.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040022#include <linux/kernel.h>
Christian Borntraeger719d93c2014-01-16 13:44:20 +010023#include <linux/srcu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Paul Mackerras56f89f362014-06-30 20:51:09 +100025#include <linux/seqlock.h>
Eric Auger9016cfb2015-09-18 22:29:44 +080026#include <linux/irqbypass.h>
Paul Mackerrase4d57e12014-06-30 20:51:12 +100027#include <trace/events/kvm.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040028
Andre Przywaraaf669ac2015-03-26 14:39:29 +000029#include <kvm/iodev.h>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040030
Paul Mackerras297e2102014-06-30 20:51:13 +100031#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -040032
Paolo Bonzini36343f62016-10-26 13:35:56 +020033static struct workqueue_struct *irqfd_cleanup_wq;
Gregory Haskins721eecbf2009-05-20 10:30:49 -040034
Peter Xu654f1f12019-05-05 16:56:42 +080035bool __attribute__((weak))
36kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
37{
38 return true;
39}
40
Gregory Haskins721eecbf2009-05-20 10:30:49 -040041static void
42irqfd_inject(struct work_struct *work)
43{
Eric Auger166c9772015-09-18 22:29:42 +080044 struct kvm_kernel_irqfd *irqfd =
45 container_of(work, struct kvm_kernel_irqfd, inject);
Gregory Haskins721eecbf2009-05-20 10:30:49 -040046 struct kvm *kvm = irqfd->kvm;
47
Alex Williamson7a844282012-09-21 11:58:03 -060048 if (!irqfd->resampler) {
Yang Zhangaa2fbe62013-04-11 19:21:40 +080049 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
50 false);
51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
52 false);
Alex Williamson7a844282012-09-21 11:58:03 -060053 } else
54 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +080055 irqfd->gsi, 1, false);
Alex Williamson7a844282012-09-21 11:58:03 -060056}
57
58/*
59 * Since resampler irqfds share an IRQ source ID, we de-assert once
60 * then notify all of the resampler irqfds using this GSI. We can't
61 * do multiple de-asserts or we risk racing with incoming re-asserts.
62 */
63static void
64irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
65{
Eric Auger166c9772015-09-18 22:29:42 +080066 struct kvm_kernel_irqfd_resampler *resampler;
Christian Borntraeger719d93c2014-01-16 13:44:20 +010067 struct kvm *kvm;
Eric Auger166c9772015-09-18 22:29:42 +080068 struct kvm_kernel_irqfd *irqfd;
Christian Borntraeger719d93c2014-01-16 13:44:20 +010069 int idx;
Alex Williamson7a844282012-09-21 11:58:03 -060070
Eric Auger166c9772015-09-18 22:29:42 +080071 resampler = container_of(kian,
72 struct kvm_kernel_irqfd_resampler, notifier);
Christian Borntraeger719d93c2014-01-16 13:44:20 +010073 kvm = resampler->kvm;
Alex Williamson7a844282012-09-21 11:58:03 -060074
Christian Borntraeger719d93c2014-01-16 13:44:20 +010075 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +080076 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -060077
Christian Borntraeger719d93c2014-01-16 13:44:20 +010078 idx = srcu_read_lock(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -060079
80 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
81 eventfd_signal(irqfd->resamplefd, 1);
82
Christian Borntraeger719d93c2014-01-16 13:44:20 +010083 srcu_read_unlock(&kvm->irq_srcu, idx);
Alex Williamson7a844282012-09-21 11:58:03 -060084}
85
86static void
Eric Auger166c9772015-09-18 22:29:42 +080087irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
Alex Williamson7a844282012-09-21 11:58:03 -060088{
Eric Auger166c9772015-09-18 22:29:42 +080089 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
Alex Williamson7a844282012-09-21 11:58:03 -060090 struct kvm *kvm = resampler->kvm;
91
92 mutex_lock(&kvm->irqfds.resampler_lock);
93
94 list_del_rcu(&irqfd->resampler_link);
Christian Borntraeger719d93c2014-01-16 13:44:20 +010095 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -060096
97 if (list_empty(&resampler->list)) {
98 list_del(&resampler->link);
99 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
100 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800101 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600102 kfree(resampler);
103 }
104
105 mutex_unlock(&kvm->irqfds.resampler_lock);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400106}
107
108/*
109 * Race-free decouple logic (ordering is critical)
110 */
111static void
112irqfd_shutdown(struct work_struct *work)
113{
Eric Auger166c9772015-09-18 22:29:42 +0800114 struct kvm_kernel_irqfd *irqfd =
115 container_of(work, struct kvm_kernel_irqfd, shutdown);
Lan Tianyub5020a82017-12-21 21:10:36 -0500116 struct kvm *kvm = irqfd->kvm;
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200117 u64 cnt;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400118
Fuad Tabba656012c2020-04-01 15:03:10 +0100119 /* Make sure irqfd has been initialized in assign path. */
Lan Tianyub5020a82017-12-21 21:10:36 -0500120 synchronize_srcu(&kvm->irq_srcu);
121
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400122 /*
123 * Synchronize with the wait-queue and unhook ourselves to prevent
124 * further events.
125 */
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200126 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400127
128 /*
129 * We know no new events will be scheduled at this point, so block
130 * until all previously outstanding events have completed
131 */
Tejun Heo43829732012-08-20 14:51:24 -0700132 flush_work(&irqfd->inject);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400133
Alex Williamson7a844282012-09-21 11:58:03 -0600134 if (irqfd->resampler) {
135 irqfd_resampler_shutdown(irqfd);
136 eventfd_ctx_put(irqfd->resamplefd);
137 }
138
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400139 /*
140 * It is now safe to release the object's resources
141 */
Eric Auger9016cfb2015-09-18 22:29:44 +0800142#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
143 irq_bypass_unregister_consumer(&irqfd->consumer);
144#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400145 eventfd_ctx_put(irqfd->eventfd);
146 kfree(irqfd);
147}
148
149
150/* assumes kvm->irqfds.lock is held */
151static bool
Eric Auger166c9772015-09-18 22:29:42 +0800152irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400153{
154 return list_empty(&irqfd->list) ? false : true;
155}
156
157/*
158 * Mark the irqfd as inactive and schedule it for removal
159 *
160 * assumes kvm->irqfds.lock is held
161 */
162static void
Eric Auger166c9772015-09-18 22:29:42 +0800163irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400164{
165 BUG_ON(!irqfd_is_active(irqfd));
166
167 list_del_init(&irqfd->list);
168
Paolo Bonzini36343f62016-10-26 13:35:56 +0200169 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400170}
171
Paolo Bonzinib97e6de2015-10-28 19:16:47 +0100172int __attribute__((weak)) kvm_arch_set_irq_inatomic(
Andrey Smetaninc9a5ecc2015-10-16 10:07:47 +0300173 struct kvm_kernel_irq_routing_entry *irq,
174 struct kvm *kvm, int irq_source_id,
175 int level,
176 bool line_status)
177{
178 return -EWOULDBLOCK;
179}
180
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400181/*
182 * Called with wqh->lock held and interrupts disabled
183 */
184static int
Ingo Molnarac6424b2017-06-20 12:06:13 +0200185irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400186{
Eric Auger166c9772015-09-18 22:29:42 +0800187 struct kvm_kernel_irqfd *irqfd =
188 container_of(wait, struct kvm_kernel_irqfd, wait);
Al Viro3ad6f932017-07-03 20:14:56 -0400189 __poll_t flags = key_to_poll(key);
Paul Mackerras56f89f362014-06-30 20:51:09 +1000190 struct kvm_kernel_irq_routing_entry irq;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200191 struct kvm *kvm = irqfd->kvm;
Paul Mackerras56f89f362014-06-30 20:51:09 +1000192 unsigned seq;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100193 int idx;
David Woodhousee8dbf192020-10-26 17:53:25 +0000194 int ret = 0;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400195
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800196 if (flags & EPOLLIN) {
David Woodhouseb59e00d2020-10-27 13:55:23 +0000197 u64 cnt;
198 eventfd_ctx_do_read(irqfd->eventfd, &cnt);
199
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100200 idx = srcu_read_lock(&kvm->irq_srcu);
Paul Mackerras56f89f362014-06-30 20:51:09 +1000201 do {
202 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
203 irq = irqfd->irq_entry;
204 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400205 /* An event has been signaled, inject an interrupt */
Paolo Bonzinib97e6de2015-10-28 19:16:47 +0100206 if (kvm_arch_set_irq_inatomic(&irq, kvm,
207 KVM_USERSPACE_IRQ_SOURCE_ID, 1,
208 false) == -EWOULDBLOCK)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200209 schedule_work(&irqfd->inject);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100210 srcu_read_unlock(&kvm->irq_srcu, idx);
David Woodhousee8dbf192020-10-26 17:53:25 +0000211 ret = 1;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200212 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400213
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800214 if (flags & EPOLLHUP) {
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400215 /* The eventfd is closing, detach from KVM */
Sebastian Andrzej Siewiorca0488a2019-03-15 18:58:15 +0100216 unsigned long iflags;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400217
Sebastian Andrzej Siewiorca0488a2019-03-15 18:58:15 +0100218 spin_lock_irqsave(&kvm->irqfds.lock, iflags);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400219
220 /*
221 * We must check if someone deactivated the irqfd before
222 * we could acquire the irqfds.lock since the item is
223 * deactivated from the KVM side before it is unhooked from
224 * the wait-queue. If it is already deactivated, we can
225 * simply return knowing the other side will cleanup for us.
226 * We cannot race against the irqfd going away since the
227 * other side is required to acquire wqh->lock, which we hold
228 */
229 if (irqfd_is_active(irqfd))
230 irqfd_deactivate(irqfd);
231
Sebastian Andrzej Siewiorca0488a2019-03-15 18:58:15 +0100232 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400233 }
234
David Woodhousee8dbf192020-10-26 17:53:25 +0000235 return ret;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400236}
237
238static void
239irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
240 poll_table *pt)
241{
Eric Auger166c9772015-09-18 22:29:42 +0800242 struct kvm_kernel_irqfd *irqfd =
243 container_of(pt, struct kvm_kernel_irqfd, pt);
David Woodhousee8dbf192020-10-26 17:53:25 +0000244 add_wait_queue_priority(wqh, &irqfd->wait);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400245}
246
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200247/* Must be called under irqfds.lock */
Eric Auger166c9772015-09-18 22:29:42 +0800248static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200249{
250 struct kvm_kernel_irq_routing_entry *e;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000251 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
Andrey Smetanin351dc6472015-10-16 10:07:45 +0300252 int n_entries;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000253
Paul Mackerras9957c862014-06-30 20:51:11 +1000254 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200255
Paul Mackerras56f89f362014-06-30 20:51:09 +1000256 write_seqcount_begin(&irqfd->irq_entry_sc);
257
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000258 e = entries;
Andrey Smetanin351dc6472015-10-16 10:07:45 +0300259 if (n_entries == 1)
260 irqfd->irq_entry = *e;
261 else
262 irqfd->irq_entry.type = 0;
Paul Mackerras56f89f362014-06-30 20:51:09 +1000263
Paul Mackerras56f89f362014-06-30 20:51:09 +1000264 write_seqcount_end(&irqfd->irq_entry_sc);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200265}
266
Eric Auger1a02b272015-09-18 22:29:43 +0800267#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
268void __attribute__((weak)) kvm_arch_irq_bypass_stop(
269 struct irq_bypass_consumer *cons)
270{
271}
272
273void __attribute__((weak)) kvm_arch_irq_bypass_start(
274 struct irq_bypass_consumer *cons)
275{
276}
Feng Wuf70c20a2015-09-18 22:29:53 +0800277
278int __attribute__((weak)) kvm_arch_update_irqfd_routing(
279 struct kvm *kvm, unsigned int host_irq,
280 uint32_t guest_irq, bool set)
281{
282 return 0;
283}
Longpeng(Mike)515a0c72021-08-27 16:00:03 +0800284
285bool __attribute__((weak)) kvm_arch_irqfd_route_changed(
286 struct kvm_kernel_irq_routing_entry *old,
287 struct kvm_kernel_irq_routing_entry *new)
288{
289 return true;
290}
Eric Auger1a02b272015-09-18 22:29:43 +0800291#endif
292
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400293static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600294kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400295{
Eric Auger166c9772015-09-18 22:29:42 +0800296 struct kvm_kernel_irqfd *irqfd, *tmp;
Al Virocffe78d2013-08-30 15:47:17 -0400297 struct fd f;
Alex Williamson7a844282012-09-21 11:58:03 -0600298 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400299 int ret;
Al Viroe6c8adc2017-07-03 22:25:56 -0400300 __poll_t events;
Paul Mackerras9957c862014-06-30 20:51:11 +1000301 int idx;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400302
Eric Auger01c94e62015-03-04 11:14:33 +0100303 if (!kvm_arch_intc_initialized(kvm))
304 return -EAGAIN;
305
Peter Xu654f1f12019-05-05 16:56:42 +0800306 if (!kvm_arch_irqfd_allowed(kvm, args))
307 return -EINVAL;
308
Ben Gardonb12ce362019-02-11 11:02:49 -0800309 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400310 if (!irqfd)
311 return -ENOMEM;
312
313 irqfd->kvm = kvm;
Alex Williamsond4db2932012-06-29 09:56:08 -0600314 irqfd->gsi = args->gsi;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400315 INIT_LIST_HEAD(&irqfd->list);
316 INIT_WORK(&irqfd->inject, irqfd_inject);
317 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
Ahmed S. Darwish5c73b9a2020-07-20 17:55:29 +0200318 seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400319
Al Virocffe78d2013-08-30 15:47:17 -0400320 f = fdget(args->fd);
321 if (!f.file) {
322 ret = -EBADF;
323 goto out;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400324 }
325
Al Virocffe78d2013-08-30 15:47:17 -0400326 eventfd = eventfd_ctx_fileget(f.file);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400327 if (IS_ERR(eventfd)) {
328 ret = PTR_ERR(eventfd);
329 goto fail;
330 }
331
332 irqfd->eventfd = eventfd;
333
Alex Williamson7a844282012-09-21 11:58:03 -0600334 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
Eric Auger166c9772015-09-18 22:29:42 +0800335 struct kvm_kernel_irqfd_resampler *resampler;
Alex Williamson7a844282012-09-21 11:58:03 -0600336
337 resamplefd = eventfd_ctx_fdget(args->resamplefd);
338 if (IS_ERR(resamplefd)) {
339 ret = PTR_ERR(resamplefd);
340 goto fail;
341 }
342
343 irqfd->resamplefd = resamplefd;
344 INIT_LIST_HEAD(&irqfd->resampler_link);
345
346 mutex_lock(&kvm->irqfds.resampler_lock);
347
348 list_for_each_entry(resampler,
Alex Williamson49f8a1a2012-12-06 14:44:59 -0700349 &kvm->irqfds.resampler_list, link) {
Alex Williamson7a844282012-09-21 11:58:03 -0600350 if (resampler->notifier.gsi == irqfd->gsi) {
351 irqfd->resampler = resampler;
352 break;
353 }
354 }
355
356 if (!irqfd->resampler) {
Ben Gardonb12ce362019-02-11 11:02:49 -0800357 resampler = kzalloc(sizeof(*resampler),
358 GFP_KERNEL_ACCOUNT);
Alex Williamson7a844282012-09-21 11:58:03 -0600359 if (!resampler) {
360 ret = -ENOMEM;
361 mutex_unlock(&kvm->irqfds.resampler_lock);
362 goto fail;
363 }
364
365 resampler->kvm = kvm;
366 INIT_LIST_HEAD(&resampler->list);
367 resampler->notifier.gsi = irqfd->gsi;
368 resampler->notifier.irq_acked = irqfd_resampler_ack;
369 INIT_LIST_HEAD(&resampler->link);
370
371 list_add(&resampler->link, &kvm->irqfds.resampler_list);
372 kvm_register_irq_ack_notifier(kvm,
373 &resampler->notifier);
374 irqfd->resampler = resampler;
375 }
376
377 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100378 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600379
380 mutex_unlock(&kvm->irqfds.resampler_lock);
381 }
382
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400383 /*
384 * Install our own custom wake-up handling so we are notified via
385 * a callback whenever someone signals the underlying eventfd
386 */
387 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
388 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
389
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200390 spin_lock_irq(&kvm->irqfds.lock);
391
392 ret = 0;
393 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
394 if (irqfd->eventfd != tmp->eventfd)
395 continue;
396 /* This fd is used for another irq already. */
397 ret = -EBUSY;
398 spin_unlock_irq(&kvm->irqfds.lock);
399 goto fail;
400 }
401
Paul Mackerras9957c862014-06-30 20:51:11 +1000402 idx = srcu_read_lock(&kvm->irq_srcu);
403 irqfd_update(kvm, irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200404
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400405 list_add_tail(&irqfd->list, &kvm->irqfds.items);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400406
Cornelia Huck684a0b72014-03-17 19:11:35 +0100407 spin_unlock_irq(&kvm->irqfds.lock);
408
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400409 /*
410 * Check if there was an event already pending on the eventfd
411 * before we registered, and trigger it as if we didn't miss it.
412 */
Christoph Hellwig9965ed172018-03-05 07:26:05 -0800413 events = vfs_poll(f.file, &irqfd->pt);
Cornelia Huck684a0b72014-03-17 19:11:35 +0100414
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800415 if (events & EPOLLIN)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400416 schedule_work(&irqfd->inject);
417
Eric Auger9016cfb2015-09-18 22:29:44 +0800418#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
Alex Williamson14717e22016-05-05 11:58:35 -0600419 if (kvm_arch_has_irq_bypass()) {
420 irqfd->consumer.token = (void *)irqfd->eventfd;
421 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
422 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
423 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
424 irqfd->consumer.start = kvm_arch_irq_bypass_start;
425 ret = irq_bypass_register_consumer(&irqfd->consumer);
426 if (ret)
427 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
Eric Auger9016cfb2015-09-18 22:29:44 +0800428 irqfd->consumer.token, ret);
Alex Williamson14717e22016-05-05 11:58:35 -0600429 }
Eric Auger9016cfb2015-09-18 22:29:44 +0800430#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400431
Lan Tianyub5020a82017-12-21 21:10:36 -0500432 srcu_read_unlock(&kvm->irq_srcu, idx);
Paolo Bonzini9432a312018-05-28 13:31:13 +0200433
434 /*
435 * do not drop the file until the irqfd is fully initialized, otherwise
436 * we might race against the EPOLLHUP
437 */
438 fdput(f);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400439 return 0;
440
441fail:
Alex Williamson7a844282012-09-21 11:58:03 -0600442 if (irqfd->resampler)
443 irqfd_resampler_shutdown(irqfd);
444
445 if (resamplefd && !IS_ERR(resamplefd))
446 eventfd_ctx_put(resamplefd);
447
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400448 if (eventfd && !IS_ERR(eventfd))
449 eventfd_ctx_put(eventfd);
450
Al Virocffe78d2013-08-30 15:47:17 -0400451 fdput(f);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400452
Al Virocffe78d2013-08-30 15:47:17 -0400453out:
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400454 kfree(irqfd);
455 return ret;
456}
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200457
458bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
459{
460 struct kvm_irq_ack_notifier *kian;
461 int gsi, idx;
462
463 idx = srcu_read_lock(&kvm->irq_srcu);
464 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
465 if (gsi != -1)
Hou Wenlong6a0c6172022-01-27 14:54:49 +0800466 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
467 link, srcu_read_lock_held(&kvm->irq_srcu))
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200468 if (kian->gsi == gsi) {
469 srcu_read_unlock(&kvm->irq_srcu, idx);
470 return true;
471 }
472
473 srcu_read_unlock(&kvm->irq_srcu, idx);
474
475 return false;
476}
477EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
478
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300479void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200480{
481 struct kvm_irq_ack_notifier *kian;
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300482
Hou Wenlong6a0c6172022-01-27 14:54:49 +0800483 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
484 link, srcu_read_lock_held(&kvm->irq_srcu))
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300485 if (kian->gsi == gsi)
486 kian->irq_acked(kian);
487}
488
489void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
490{
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200491 int gsi, idx;
492
493 trace_kvm_ack_irq(irqchip, pin);
494
495 idx = srcu_read_lock(&kvm->irq_srcu);
496 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
497 if (gsi != -1)
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300498 kvm_notify_acked_gsi(kvm, gsi);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200499 srcu_read_unlock(&kvm->irq_srcu, idx);
500}
501
502void kvm_register_irq_ack_notifier(struct kvm *kvm,
503 struct kvm_irq_ack_notifier *kian)
504{
505 mutex_lock(&kvm->irq_lock);
506 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
507 mutex_unlock(&kvm->irq_lock);
David Hildenbrand993225a2017-04-07 10:50:33 +0200508 kvm_arch_post_irq_ack_notifier_list_update(kvm);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200509}
510
511void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
512 struct kvm_irq_ack_notifier *kian)
513{
514 mutex_lock(&kvm->irq_lock);
515 hlist_del_init_rcu(&kian->link);
516 mutex_unlock(&kvm->irq_lock);
517 synchronize_srcu(&kvm->irq_srcu);
David Hildenbrand993225a2017-04-07 10:50:33 +0200518 kvm_arch_post_irq_ack_notifier_list_update(kvm);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200519}
Alexander Graf914daba2012-10-09 00:22:59 +0200520#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400521
522void
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400523kvm_eventfd_init(struct kvm *kvm)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400524{
Paul Mackerras297e2102014-06-30 20:51:13 +1000525#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400526 spin_lock_init(&kvm->irqfds.lock);
527 INIT_LIST_HEAD(&kvm->irqfds.items);
Alex Williamson7a844282012-09-21 11:58:03 -0600528 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
529 mutex_init(&kvm->irqfds.resampler_lock);
Alexander Graf914daba2012-10-09 00:22:59 +0200530#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400531 INIT_LIST_HEAD(&kvm->ioeventfds);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400532}
533
Paul Mackerras297e2102014-06-30 20:51:13 +1000534#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400535/*
536 * shutdown any irqfd's that match fd+gsi
537 */
538static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600539kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400540{
Eric Auger166c9772015-09-18 22:29:42 +0800541 struct kvm_kernel_irqfd *irqfd, *tmp;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400542 struct eventfd_ctx *eventfd;
543
Alex Williamsond4db2932012-06-29 09:56:08 -0600544 eventfd = eventfd_ctx_fdget(args->fd);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400545 if (IS_ERR(eventfd))
546 return PTR_ERR(eventfd);
547
548 spin_lock_irq(&kvm->irqfds.lock);
549
550 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
Alex Williamsond4db2932012-06-29 09:56:08 -0600551 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200552 /*
Paul Mackerras56f89f362014-06-30 20:51:09 +1000553 * This clearing of irq_entry.type is needed for when
Michael S. Tsirkinc8ce0572011-03-06 13:03:26 +0200554 * another thread calls kvm_irq_routing_update before
555 * we flush workqueue below (we synchronize with
556 * kvm_irq_routing_update using irqfds.lock).
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200557 */
Paul Mackerras56f89f362014-06-30 20:51:09 +1000558 write_seqcount_begin(&irqfd->irq_entry_sc);
559 irqfd->irq_entry.type = 0;
560 write_seqcount_end(&irqfd->irq_entry_sc);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400561 irqfd_deactivate(irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200562 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400563 }
564
565 spin_unlock_irq(&kvm->irqfds.lock);
566 eventfd_ctx_put(eventfd);
567
568 /*
569 * Block until we know all outstanding shutdown jobs have completed
570 * so that we guarantee there will not be any more interrupts on this
571 * gsi once this deassign function returns.
572 */
Paolo Bonzini36343f62016-10-26 13:35:56 +0200573 flush_workqueue(irqfd_cleanup_wq);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400574
575 return 0;
576}
577
578int
Alex Williamsond4db2932012-06-29 09:56:08 -0600579kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400580{
Alex Williamson7a844282012-09-21 11:58:03 -0600581 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
Alex Williamson326cf032012-06-29 09:56:24 -0600582 return -EINVAL;
583
Alex Williamsond4db2932012-06-29 09:56:08 -0600584 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
585 return kvm_irqfd_deassign(kvm, args);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400586
Alex Williamsond4db2932012-06-29 09:56:08 -0600587 return kvm_irqfd_assign(kvm, args);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400588}
589
590/*
591 * This function is called as the kvm VM fd is being released. Shutdown all
592 * irqfds that still remain open
593 */
594void
595kvm_irqfd_release(struct kvm *kvm)
596{
Eric Auger166c9772015-09-18 22:29:42 +0800597 struct kvm_kernel_irqfd *irqfd, *tmp;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400598
599 spin_lock_irq(&kvm->irqfds.lock);
600
601 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
602 irqfd_deactivate(irqfd);
603
604 spin_unlock_irq(&kvm->irqfds.lock);
605
606 /*
607 * Block until we know all outstanding shutdown jobs have completed
608 * since we do not take a kvm* reference.
609 */
Paolo Bonzini36343f62016-10-26 13:35:56 +0200610 flush_workqueue(irqfd_cleanup_wq);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400611
612}
613
614/*
Paul Mackerras9957c862014-06-30 20:51:11 +1000615 * Take note of a change in irq routing.
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100616 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200617 */
Paul Mackerras9957c862014-06-30 20:51:11 +1000618void kvm_irq_routing_update(struct kvm *kvm)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200619{
Eric Auger166c9772015-09-18 22:29:42 +0800620 struct kvm_kernel_irqfd *irqfd;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200621
622 spin_lock_irq(&kvm->irqfds.lock);
623
Feng Wuf70c20a2015-09-18 22:29:53 +0800624 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
Longpeng(Mike)515a0c72021-08-27 16:00:03 +0800625#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
626 /* Under irqfds.lock, so can read irq_entry safely */
627 struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry;
628#endif
629
Paul Mackerras9957c862014-06-30 20:51:11 +1000630 irqfd_update(kvm, irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200631
Feng Wuf70c20a2015-09-18 22:29:53 +0800632#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
Longpeng(Mike)515a0c72021-08-27 16:00:03 +0800633 if (irqfd->producer &&
634 kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) {
Feng Wuf70c20a2015-09-18 22:29:53 +0800635 int ret = kvm_arch_update_irqfd_routing(
636 irqfd->kvm, irqfd->producer->irq,
637 irqfd->gsi, 1);
638 WARN_ON(ret);
639 }
640#endif
641 }
642
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200643 spin_unlock_irq(&kvm->irqfds.lock);
644}
645
Paolo Bonzini36343f62016-10-26 13:35:56 +0200646/*
647 * create a host-wide workqueue for issuing deferred shutdown requests
648 * aggregated from all vm* instances. We need our own isolated
649 * queue to ease flushing work items when a VM exits.
650 */
651int kvm_irqfd_init(void)
652{
653 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
654 if (!irqfd_cleanup_wq)
655 return -ENOMEM;
656
657 return 0;
658}
659
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100660void kvm_irqfd_exit(void)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400661{
Paolo Bonzini36343f62016-10-26 13:35:56 +0200662 destroy_workqueue(irqfd_cleanup_wq);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400663}
Alexander Graf914daba2012-10-09 00:22:59 +0200664#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400665
666/*
667 * --------------------------------------------------------------------
668 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
669 *
670 * userspace can register a PIO/MMIO address with an eventfd for receiving
671 * notification when the memory has been touched.
672 * --------------------------------------------------------------------
673 */
674
675struct _ioeventfd {
676 struct list_head list;
677 u64 addr;
678 int length;
679 struct eventfd_ctx *eventfd;
680 u64 datamatch;
681 struct kvm_io_device dev;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300682 u8 bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400683 bool wildcard;
684};
685
686static inline struct _ioeventfd *
687to_ioeventfd(struct kvm_io_device *dev)
688{
689 return container_of(dev, struct _ioeventfd, dev);
690}
691
692static void
693ioeventfd_release(struct _ioeventfd *p)
694{
695 eventfd_ctx_put(p->eventfd);
696 list_del(&p->list);
697 kfree(p);
698}
699
700static bool
701ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
702{
703 u64 _val;
704
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300705 if (addr != p->addr)
706 /* address must be precise for a hit */
707 return false;
708
709 if (!p->length)
710 /* length = 0 means only look at the address, so always a hit */
711 return true;
712
713 if (len != p->length)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400714 /* address-range must be precise for a hit */
715 return false;
716
717 if (p->wildcard)
718 /* all else equal, wildcard is always a hit */
719 return true;
720
721 /* otherwise, we have to actually compare the data */
722
723 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
724
725 switch (len) {
726 case 1:
727 _val = *(u8 *)val;
728 break;
729 case 2:
730 _val = *(u16 *)val;
731 break;
732 case 4:
733 _val = *(u32 *)val;
734 break;
735 case 8:
736 _val = *(u64 *)val;
737 break;
738 default:
739 return false;
740 }
741
Jason Yanc4e115f2020-04-20 20:38:05 +0800742 return _val == p->datamatch;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400743}
744
745/* MMIO/PIO writes trigger an event if the addr/val match */
746static int
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000747ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
748 int len, const void *val)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400749{
750 struct _ioeventfd *p = to_ioeventfd(this);
751
752 if (!ioeventfd_in_range(p, addr, len, val))
753 return -EOPNOTSUPP;
754
755 eventfd_signal(p->eventfd, 1);
756 return 0;
757}
758
759/*
760 * This function is called as KVM is completely shutting down. We do not
761 * need to worry about locking just nuke anything we have as quickly as possible
762 */
763static void
764ioeventfd_destructor(struct kvm_io_device *this)
765{
766 struct _ioeventfd *p = to_ioeventfd(this);
767
768 ioeventfd_release(p);
769}
770
771static const struct kvm_io_device_ops ioeventfd_ops = {
772 .write = ioeventfd_write,
773 .destructor = ioeventfd_destructor,
774};
775
776/* assumes kvm->slots_lock held */
777static bool
778ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
779{
780 struct _ioeventfd *_p;
781
782 list_for_each_entry(_p, &kvm->ioeventfds, list)
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300783 if (_p->bus_idx == p->bus_idx &&
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300784 _p->addr == p->addr &&
785 (!_p->length || !p->length ||
786 (_p->length == p->length &&
787 (_p->wildcard || p->wildcard ||
788 _p->datamatch == p->datamatch))))
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400789 return true;
790
791 return false;
792}
793
Cornelia Huck2b834512013-02-28 12:33:20 +0100794static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
795{
796 if (flags & KVM_IOEVENTFD_FLAG_PIO)
797 return KVM_PIO_BUS;
798 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
799 return KVM_VIRTIO_CCW_NOTIFY_BUS;
800 return KVM_MMIO_BUS;
801}
802
Jason Wang85da11c2015-09-15 14:41:55 +0800803static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
804 enum kvm_bus bus_idx,
805 struct kvm_ioeventfd *args)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400806{
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400807
Jason Wang85da11c2015-09-15 14:41:55 +0800808 struct eventfd_ctx *eventfd;
809 struct _ioeventfd *p;
810 int ret;
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300811
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400812 eventfd = eventfd_ctx_fdget(args->fd);
813 if (IS_ERR(eventfd))
814 return PTR_ERR(eventfd);
815
Ben Gardonb12ce362019-02-11 11:02:49 -0800816 p = kzalloc(sizeof(*p), GFP_KERNEL_ACCOUNT);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400817 if (!p) {
818 ret = -ENOMEM;
819 goto fail;
820 }
821
822 INIT_LIST_HEAD(&p->list);
823 p->addr = args->addr;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300824 p->bus_idx = bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400825 p->length = args->len;
826 p->eventfd = eventfd;
827
828 /* The datamatch feature is optional, otherwise this is a wildcard */
829 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
830 p->datamatch = args->datamatch;
831 else
832 p->wildcard = true;
833
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200834 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400835
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300836 /* Verify that there isn't a match already */
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400837 if (ioeventfd_check_collision(kvm, p)) {
838 ret = -EEXIST;
839 goto unlock_fail;
840 }
841
842 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
843
Sasha Levin743eeb02011-07-27 16:00:48 +0300844 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
845 &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400846 if (ret < 0)
847 goto unlock_fail;
848
Christian Borntraeger4a12f952017-07-07 10:51:38 +0200849 kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400850 list_add_tail(&p->list, &kvm->ioeventfds);
851
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200852 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400853
854 return 0;
855
856unlock_fail:
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200857 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400858
859fail:
860 kfree(p);
861 eventfd_ctx_put(eventfd);
862
863 return ret;
864}
865
866static int
Jason Wang85da11c2015-09-15 14:41:55 +0800867kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
868 struct kvm_ioeventfd *args)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400869{
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400870 struct _ioeventfd *p, *tmp;
871 struct eventfd_ctx *eventfd;
Christian Borntraeger4a12f952017-07-07 10:51:38 +0200872 struct kvm_io_bus *bus;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400873 int ret = -ENOENT;
Yi Li2fc4f152020-09-11 13:56:52 +0800874 bool wildcard;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400875
876 eventfd = eventfd_ctx_fdget(args->fd);
877 if (IS_ERR(eventfd))
878 return PTR_ERR(eventfd);
879
Yi Li2fc4f152020-09-11 13:56:52 +0800880 wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
881
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200882 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400883
884 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400885
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300886 if (p->bus_idx != bus_idx ||
887 p->eventfd != eventfd ||
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400888 p->addr != args->addr ||
889 p->length != args->len ||
890 p->wildcard != wildcard)
891 continue;
892
893 if (!p->wildcard && p->datamatch != args->datamatch)
894 continue;
895
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200896 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
Christian Borntraeger4a12f952017-07-07 10:51:38 +0200897 bus = kvm_get_bus(kvm, bus_idx);
898 if (bus)
899 bus->ioeventfd_count--;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400900 ioeventfd_release(p);
901 ret = 0;
902 break;
903 }
904
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200905 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400906
907 eventfd_ctx_put(eventfd);
908
909 return ret;
910}
911
Jason Wang85da11c2015-09-15 14:41:55 +0800912static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
913{
914 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
Jason Wangeefd6b02015-09-15 14:41:56 +0800915 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
Jason Wang85da11c2015-09-15 14:41:55 +0800916
Jason Wangeefd6b02015-09-15 14:41:56 +0800917 if (!args->len && bus_idx == KVM_MMIO_BUS)
918 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
919
920 return ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800921}
922
923static int
924kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
925{
926 enum kvm_bus bus_idx;
Jason Wangeefd6b02015-09-15 14:41:56 +0800927 int ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800928
929 bus_idx = ioeventfd_bus_from_flags(args->flags);
930 /* must be natural-word sized, or 0 to ignore length */
931 switch (args->len) {
932 case 0:
933 case 1:
934 case 2:
935 case 4:
936 case 8:
937 break;
938 default:
939 return -EINVAL;
940 }
941
942 /* check for range overflow */
943 if (args->addr + args->len < args->addr)
944 return -EINVAL;
945
946 /* check for extra flags that we don't understand */
947 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
948 return -EINVAL;
949
950 /* ioeventfd with no length can't be combined with DATAMATCH */
Jason Wange9ea5062015-09-15 14:41:59 +0800951 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
Jason Wang85da11c2015-09-15 14:41:55 +0800952 return -EINVAL;
953
Jason Wangeefd6b02015-09-15 14:41:56 +0800954 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
955 if (ret)
956 goto fail;
957
958 /* When length is ignored, MMIO is also put on a separate bus, for
959 * faster lookups.
960 */
961 if (!args->len && bus_idx == KVM_MMIO_BUS) {
962 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
963 if (ret < 0)
964 goto fast_fail;
965 }
966
967 return 0;
968
969fast_fail:
970 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
971fail:
972 return ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800973}
974
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400975int
976kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
977{
978 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
979 return kvm_deassign_ioeventfd(kvm, args);
980
981 return kvm_assign_ioeventfd(kvm, args);
982}