blob: c4f7abec42616f6040ba07a4e420506fd72cffff [file] [log] [blame]
Gregory Haskins721eecbf2009-05-20 10:30:49 -04001/*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Gregory Haskins721eecbf2009-05-20 10:30:49 -04006 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24#include <linux/kvm_host.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040025#include <linux/kvm.h>
Eric Auger166c9772015-09-18 22:29:42 +080026#include <linux/kvm_irqfd.h>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040027#include <linux/workqueue.h>
28#include <linux/syscalls.h>
29#include <linux/wait.h>
30#include <linux/poll.h>
31#include <linux/file.h>
32#include <linux/list.h>
33#include <linux/eventfd.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040034#include <linux/kernel.h>
Christian Borntraeger719d93c2014-01-16 13:44:20 +010035#include <linux/srcu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Paul Mackerras56f89f362014-06-30 20:51:09 +100037#include <linux/seqlock.h>
Paul Mackerrase4d57e12014-06-30 20:51:12 +100038#include <trace/events/kvm.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040039
Andre Przywaraaf669ac2015-03-26 14:39:29 +000040#include <kvm/iodev.h>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040041
Paul Mackerras297e2102014-06-30 20:51:13 +100042#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -040043
44static struct workqueue_struct *irqfd_cleanup_wq;
45
46static void
47irqfd_inject(struct work_struct *work)
48{
Eric Auger166c9772015-09-18 22:29:42 +080049 struct kvm_kernel_irqfd *irqfd =
50 container_of(work, struct kvm_kernel_irqfd, inject);
Gregory Haskins721eecbf2009-05-20 10:30:49 -040051 struct kvm *kvm = irqfd->kvm;
52
Alex Williamson7a844282012-09-21 11:58:03 -060053 if (!irqfd->resampler) {
Yang Zhangaa2fbe62013-04-11 19:21:40 +080054 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
55 false);
56 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
57 false);
Alex Williamson7a844282012-09-21 11:58:03 -060058 } else
59 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +080060 irqfd->gsi, 1, false);
Alex Williamson7a844282012-09-21 11:58:03 -060061}
62
63/*
64 * Since resampler irqfds share an IRQ source ID, we de-assert once
65 * then notify all of the resampler irqfds using this GSI. We can't
66 * do multiple de-asserts or we risk racing with incoming re-asserts.
67 */
68static void
69irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
70{
Eric Auger166c9772015-09-18 22:29:42 +080071 struct kvm_kernel_irqfd_resampler *resampler;
Christian Borntraeger719d93c2014-01-16 13:44:20 +010072 struct kvm *kvm;
Eric Auger166c9772015-09-18 22:29:42 +080073 struct kvm_kernel_irqfd *irqfd;
Christian Borntraeger719d93c2014-01-16 13:44:20 +010074 int idx;
Alex Williamson7a844282012-09-21 11:58:03 -060075
Eric Auger166c9772015-09-18 22:29:42 +080076 resampler = container_of(kian,
77 struct kvm_kernel_irqfd_resampler, notifier);
Christian Borntraeger719d93c2014-01-16 13:44:20 +010078 kvm = resampler->kvm;
Alex Williamson7a844282012-09-21 11:58:03 -060079
Christian Borntraeger719d93c2014-01-16 13:44:20 +010080 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +080081 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -060082
Christian Borntraeger719d93c2014-01-16 13:44:20 +010083 idx = srcu_read_lock(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -060084
85 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
86 eventfd_signal(irqfd->resamplefd, 1);
87
Christian Borntraeger719d93c2014-01-16 13:44:20 +010088 srcu_read_unlock(&kvm->irq_srcu, idx);
Alex Williamson7a844282012-09-21 11:58:03 -060089}
90
91static void
Eric Auger166c9772015-09-18 22:29:42 +080092irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
Alex Williamson7a844282012-09-21 11:58:03 -060093{
Eric Auger166c9772015-09-18 22:29:42 +080094 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
Alex Williamson7a844282012-09-21 11:58:03 -060095 struct kvm *kvm = resampler->kvm;
96
97 mutex_lock(&kvm->irqfds.resampler_lock);
98
99 list_del_rcu(&irqfd->resampler_link);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100100 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600101
102 if (list_empty(&resampler->list)) {
103 list_del(&resampler->link);
104 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
105 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800106 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600107 kfree(resampler);
108 }
109
110 mutex_unlock(&kvm->irqfds.resampler_lock);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400111}
112
113/*
114 * Race-free decouple logic (ordering is critical)
115 */
116static void
117irqfd_shutdown(struct work_struct *work)
118{
Eric Auger166c9772015-09-18 22:29:42 +0800119 struct kvm_kernel_irqfd *irqfd =
120 container_of(work, struct kvm_kernel_irqfd, shutdown);
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200121 u64 cnt;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400122
123 /*
124 * Synchronize with the wait-queue and unhook ourselves to prevent
125 * further events.
126 */
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200127 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400128
129 /*
130 * We know no new events will be scheduled at this point, so block
131 * until all previously outstanding events have completed
132 */
Tejun Heo43829732012-08-20 14:51:24 -0700133 flush_work(&irqfd->inject);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400134
Alex Williamson7a844282012-09-21 11:58:03 -0600135 if (irqfd->resampler) {
136 irqfd_resampler_shutdown(irqfd);
137 eventfd_ctx_put(irqfd->resamplefd);
138 }
139
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400140 /*
141 * It is now safe to release the object's resources
142 */
143 eventfd_ctx_put(irqfd->eventfd);
144 kfree(irqfd);
145}
146
147
148/* assumes kvm->irqfds.lock is held */
149static bool
Eric Auger166c9772015-09-18 22:29:42 +0800150irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400151{
152 return list_empty(&irqfd->list) ? false : true;
153}
154
155/*
156 * Mark the irqfd as inactive and schedule it for removal
157 *
158 * assumes kvm->irqfds.lock is held
159 */
160static void
Eric Auger166c9772015-09-18 22:29:42 +0800161irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400162{
163 BUG_ON(!irqfd_is_active(irqfd));
164
165 list_del_init(&irqfd->list);
166
167 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
168}
169
170/*
171 * Called with wqh->lock held and interrupts disabled
172 */
173static int
174irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
175{
Eric Auger166c9772015-09-18 22:29:42 +0800176 struct kvm_kernel_irqfd *irqfd =
177 container_of(wait, struct kvm_kernel_irqfd, wait);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400178 unsigned long flags = (unsigned long)key;
Paul Mackerras56f89f362014-06-30 20:51:09 +1000179 struct kvm_kernel_irq_routing_entry irq;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200180 struct kvm *kvm = irqfd->kvm;
Paul Mackerras56f89f362014-06-30 20:51:09 +1000181 unsigned seq;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100182 int idx;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400183
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200184 if (flags & POLLIN) {
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100185 idx = srcu_read_lock(&kvm->irq_srcu);
Paul Mackerras56f89f362014-06-30 20:51:09 +1000186 do {
187 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
188 irq = irqfd->irq_entry;
189 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400190 /* An event has been signaled, inject an interrupt */
Paul Mackerras56f89f362014-06-30 20:51:09 +1000191 if (irq.type == KVM_IRQ_ROUTING_MSI)
192 kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800193 false);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200194 else
195 schedule_work(&irqfd->inject);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100196 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200197 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400198
199 if (flags & POLLHUP) {
200 /* The eventfd is closing, detach from KVM */
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400201 unsigned long flags;
202
203 spin_lock_irqsave(&kvm->irqfds.lock, flags);
204
205 /*
206 * We must check if someone deactivated the irqfd before
207 * we could acquire the irqfds.lock since the item is
208 * deactivated from the KVM side before it is unhooked from
209 * the wait-queue. If it is already deactivated, we can
210 * simply return knowing the other side will cleanup for us.
211 * We cannot race against the irqfd going away since the
212 * other side is required to acquire wqh->lock, which we hold
213 */
214 if (irqfd_is_active(irqfd))
215 irqfd_deactivate(irqfd);
216
217 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
218 }
219
220 return 0;
221}
222
223static void
224irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
225 poll_table *pt)
226{
Eric Auger166c9772015-09-18 22:29:42 +0800227 struct kvm_kernel_irqfd *irqfd =
228 container_of(pt, struct kvm_kernel_irqfd, pt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400229 add_wait_queue(wqh, &irqfd->wait);
230}
231
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200232/* Must be called under irqfds.lock */
Eric Auger166c9772015-09-18 22:29:42 +0800233static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200234{
235 struct kvm_kernel_irq_routing_entry *e;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000236 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
237 int i, n_entries;
238
Paul Mackerras9957c862014-06-30 20:51:11 +1000239 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200240
Paul Mackerras56f89f362014-06-30 20:51:09 +1000241 write_seqcount_begin(&irqfd->irq_entry_sc);
242
243 irqfd->irq_entry.type = 0;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200244
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000245 e = entries;
246 for (i = 0; i < n_entries; ++i, ++e) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200247 /* Only fast-path MSI. */
248 if (e->type == KVM_IRQ_ROUTING_MSI)
Paul Mackerras56f89f362014-06-30 20:51:09 +1000249 irqfd->irq_entry = *e;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200250 }
Paul Mackerras56f89f362014-06-30 20:51:09 +1000251
Paul Mackerras56f89f362014-06-30 20:51:09 +1000252 write_seqcount_end(&irqfd->irq_entry_sc);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200253}
254
Eric Auger1a02b272015-09-18 22:29:43 +0800255#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
256void __attribute__((weak)) kvm_arch_irq_bypass_stop(
257 struct irq_bypass_consumer *cons)
258{
259}
260
261void __attribute__((weak)) kvm_arch_irq_bypass_start(
262 struct irq_bypass_consumer *cons)
263{
264}
265#endif
266
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400267static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600268kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400269{
Eric Auger166c9772015-09-18 22:29:42 +0800270 struct kvm_kernel_irqfd *irqfd, *tmp;
Al Virocffe78d2013-08-30 15:47:17 -0400271 struct fd f;
Alex Williamson7a844282012-09-21 11:58:03 -0600272 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400273 int ret;
274 unsigned int events;
Paul Mackerras9957c862014-06-30 20:51:11 +1000275 int idx;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400276
Eric Auger01c94e62015-03-04 11:14:33 +0100277 if (!kvm_arch_intc_initialized(kvm))
278 return -EAGAIN;
279
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400280 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
281 if (!irqfd)
282 return -ENOMEM;
283
284 irqfd->kvm = kvm;
Alex Williamsond4db2932012-06-29 09:56:08 -0600285 irqfd->gsi = args->gsi;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400286 INIT_LIST_HEAD(&irqfd->list);
287 INIT_WORK(&irqfd->inject, irqfd_inject);
288 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
Paul Mackerras56f89f362014-06-30 20:51:09 +1000289 seqcount_init(&irqfd->irq_entry_sc);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400290
Al Virocffe78d2013-08-30 15:47:17 -0400291 f = fdget(args->fd);
292 if (!f.file) {
293 ret = -EBADF;
294 goto out;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400295 }
296
Al Virocffe78d2013-08-30 15:47:17 -0400297 eventfd = eventfd_ctx_fileget(f.file);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400298 if (IS_ERR(eventfd)) {
299 ret = PTR_ERR(eventfd);
300 goto fail;
301 }
302
303 irqfd->eventfd = eventfd;
304
Alex Williamson7a844282012-09-21 11:58:03 -0600305 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
Eric Auger166c9772015-09-18 22:29:42 +0800306 struct kvm_kernel_irqfd_resampler *resampler;
Alex Williamson7a844282012-09-21 11:58:03 -0600307
308 resamplefd = eventfd_ctx_fdget(args->resamplefd);
309 if (IS_ERR(resamplefd)) {
310 ret = PTR_ERR(resamplefd);
311 goto fail;
312 }
313
314 irqfd->resamplefd = resamplefd;
315 INIT_LIST_HEAD(&irqfd->resampler_link);
316
317 mutex_lock(&kvm->irqfds.resampler_lock);
318
319 list_for_each_entry(resampler,
Alex Williamson49f8a1a2012-12-06 14:44:59 -0700320 &kvm->irqfds.resampler_list, link) {
Alex Williamson7a844282012-09-21 11:58:03 -0600321 if (resampler->notifier.gsi == irqfd->gsi) {
322 irqfd->resampler = resampler;
323 break;
324 }
325 }
326
327 if (!irqfd->resampler) {
328 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
329 if (!resampler) {
330 ret = -ENOMEM;
331 mutex_unlock(&kvm->irqfds.resampler_lock);
332 goto fail;
333 }
334
335 resampler->kvm = kvm;
336 INIT_LIST_HEAD(&resampler->list);
337 resampler->notifier.gsi = irqfd->gsi;
338 resampler->notifier.irq_acked = irqfd_resampler_ack;
339 INIT_LIST_HEAD(&resampler->link);
340
341 list_add(&resampler->link, &kvm->irqfds.resampler_list);
342 kvm_register_irq_ack_notifier(kvm,
343 &resampler->notifier);
344 irqfd->resampler = resampler;
345 }
346
347 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100348 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600349
350 mutex_unlock(&kvm->irqfds.resampler_lock);
351 }
352
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400353 /*
354 * Install our own custom wake-up handling so we are notified via
355 * a callback whenever someone signals the underlying eventfd
356 */
357 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
358 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
359
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200360 spin_lock_irq(&kvm->irqfds.lock);
361
362 ret = 0;
363 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
364 if (irqfd->eventfd != tmp->eventfd)
365 continue;
366 /* This fd is used for another irq already. */
367 ret = -EBUSY;
368 spin_unlock_irq(&kvm->irqfds.lock);
369 goto fail;
370 }
371
Paul Mackerras9957c862014-06-30 20:51:11 +1000372 idx = srcu_read_lock(&kvm->irq_srcu);
373 irqfd_update(kvm, irqfd);
374 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200375
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400376 list_add_tail(&irqfd->list, &kvm->irqfds.items);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400377
Cornelia Huck684a0b72014-03-17 19:11:35 +0100378 spin_unlock_irq(&kvm->irqfds.lock);
379
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400380 /*
381 * Check if there was an event already pending on the eventfd
382 * before we registered, and trigger it as if we didn't miss it.
383 */
Cornelia Huck684a0b72014-03-17 19:11:35 +0100384 events = f.file->f_op->poll(f.file, &irqfd->pt);
385
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400386 if (events & POLLIN)
387 schedule_work(&irqfd->inject);
388
389 /*
390 * do not drop the file until the irqfd is fully initialized, otherwise
391 * we might race against the POLLHUP
392 */
Al Virocffe78d2013-08-30 15:47:17 -0400393 fdput(f);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400394
395 return 0;
396
397fail:
Alex Williamson7a844282012-09-21 11:58:03 -0600398 if (irqfd->resampler)
399 irqfd_resampler_shutdown(irqfd);
400
401 if (resamplefd && !IS_ERR(resamplefd))
402 eventfd_ctx_put(resamplefd);
403
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400404 if (eventfd && !IS_ERR(eventfd))
405 eventfd_ctx_put(eventfd);
406
Al Virocffe78d2013-08-30 15:47:17 -0400407 fdput(f);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400408
Al Virocffe78d2013-08-30 15:47:17 -0400409out:
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400410 kfree(irqfd);
411 return ret;
412}
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200413
414bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
415{
416 struct kvm_irq_ack_notifier *kian;
417 int gsi, idx;
418
419 idx = srcu_read_lock(&kvm->irq_srcu);
420 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
421 if (gsi != -1)
422 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
423 link)
424 if (kian->gsi == gsi) {
425 srcu_read_unlock(&kvm->irq_srcu, idx);
426 return true;
427 }
428
429 srcu_read_unlock(&kvm->irq_srcu, idx);
430
431 return false;
432}
433EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
434
435void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
436{
437 struct kvm_irq_ack_notifier *kian;
438 int gsi, idx;
439
440 trace_kvm_ack_irq(irqchip, pin);
441
442 idx = srcu_read_lock(&kvm->irq_srcu);
443 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
444 if (gsi != -1)
445 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
446 link)
447 if (kian->gsi == gsi)
448 kian->irq_acked(kian);
449 srcu_read_unlock(&kvm->irq_srcu, idx);
450}
451
452void kvm_register_irq_ack_notifier(struct kvm *kvm,
453 struct kvm_irq_ack_notifier *kian)
454{
455 mutex_lock(&kvm->irq_lock);
456 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
457 mutex_unlock(&kvm->irq_lock);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200458 kvm_vcpu_request_scan_ioapic(kvm);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200459}
460
461void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
462 struct kvm_irq_ack_notifier *kian)
463{
464 mutex_lock(&kvm->irq_lock);
465 hlist_del_init_rcu(&kian->link);
466 mutex_unlock(&kvm->irq_lock);
467 synchronize_srcu(&kvm->irq_srcu);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200468 kvm_vcpu_request_scan_ioapic(kvm);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200469}
Alexander Graf914daba2012-10-09 00:22:59 +0200470#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400471
472void
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400473kvm_eventfd_init(struct kvm *kvm)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400474{
Paul Mackerras297e2102014-06-30 20:51:13 +1000475#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400476 spin_lock_init(&kvm->irqfds.lock);
477 INIT_LIST_HEAD(&kvm->irqfds.items);
Alex Williamson7a844282012-09-21 11:58:03 -0600478 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
479 mutex_init(&kvm->irqfds.resampler_lock);
Alexander Graf914daba2012-10-09 00:22:59 +0200480#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400481 INIT_LIST_HEAD(&kvm->ioeventfds);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400482}
483
Paul Mackerras297e2102014-06-30 20:51:13 +1000484#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400485/*
486 * shutdown any irqfd's that match fd+gsi
487 */
488static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600489kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400490{
Eric Auger166c9772015-09-18 22:29:42 +0800491 struct kvm_kernel_irqfd *irqfd, *tmp;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400492 struct eventfd_ctx *eventfd;
493
Alex Williamsond4db2932012-06-29 09:56:08 -0600494 eventfd = eventfd_ctx_fdget(args->fd);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400495 if (IS_ERR(eventfd))
496 return PTR_ERR(eventfd);
497
498 spin_lock_irq(&kvm->irqfds.lock);
499
500 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
Alex Williamsond4db2932012-06-29 09:56:08 -0600501 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200502 /*
Paul Mackerras56f89f362014-06-30 20:51:09 +1000503 * This clearing of irq_entry.type is needed for when
Michael S. Tsirkinc8ce0572011-03-06 13:03:26 +0200504 * another thread calls kvm_irq_routing_update before
505 * we flush workqueue below (we synchronize with
506 * kvm_irq_routing_update using irqfds.lock).
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200507 */
Paul Mackerras56f89f362014-06-30 20:51:09 +1000508 write_seqcount_begin(&irqfd->irq_entry_sc);
509 irqfd->irq_entry.type = 0;
510 write_seqcount_end(&irqfd->irq_entry_sc);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400511 irqfd_deactivate(irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200512 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400513 }
514
515 spin_unlock_irq(&kvm->irqfds.lock);
516 eventfd_ctx_put(eventfd);
517
518 /*
519 * Block until we know all outstanding shutdown jobs have completed
520 * so that we guarantee there will not be any more interrupts on this
521 * gsi once this deassign function returns.
522 */
523 flush_workqueue(irqfd_cleanup_wq);
524
525 return 0;
526}
527
528int
Alex Williamsond4db2932012-06-29 09:56:08 -0600529kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400530{
Alex Williamson7a844282012-09-21 11:58:03 -0600531 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
Alex Williamson326cf032012-06-29 09:56:24 -0600532 return -EINVAL;
533
Alex Williamsond4db2932012-06-29 09:56:08 -0600534 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
535 return kvm_irqfd_deassign(kvm, args);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400536
Alex Williamsond4db2932012-06-29 09:56:08 -0600537 return kvm_irqfd_assign(kvm, args);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400538}
539
540/*
541 * This function is called as the kvm VM fd is being released. Shutdown all
542 * irqfds that still remain open
543 */
544void
545kvm_irqfd_release(struct kvm *kvm)
546{
Eric Auger166c9772015-09-18 22:29:42 +0800547 struct kvm_kernel_irqfd *irqfd, *tmp;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400548
549 spin_lock_irq(&kvm->irqfds.lock);
550
551 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
552 irqfd_deactivate(irqfd);
553
554 spin_unlock_irq(&kvm->irqfds.lock);
555
556 /*
557 * Block until we know all outstanding shutdown jobs have completed
558 * since we do not take a kvm* reference.
559 */
560 flush_workqueue(irqfd_cleanup_wq);
561
562}
563
564/*
Paul Mackerras9957c862014-06-30 20:51:11 +1000565 * Take note of a change in irq routing.
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100566 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200567 */
Paul Mackerras9957c862014-06-30 20:51:11 +1000568void kvm_irq_routing_update(struct kvm *kvm)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200569{
Eric Auger166c9772015-09-18 22:29:42 +0800570 struct kvm_kernel_irqfd *irqfd;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200571
572 spin_lock_irq(&kvm->irqfds.lock);
573
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200574 list_for_each_entry(irqfd, &kvm->irqfds.items, list)
Paul Mackerras9957c862014-06-30 20:51:11 +1000575 irqfd_update(kvm, irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200576
577 spin_unlock_irq(&kvm->irqfds.lock);
578}
579
580/*
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400581 * create a host-wide workqueue for issuing deferred shutdown requests
582 * aggregated from all vm* instances. We need our own isolated single-thread
583 * queue to prevent deadlock against flushing the normal work-queue.
584 */
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100585int kvm_irqfd_init(void)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400586{
587 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
588 if (!irqfd_cleanup_wq)
589 return -ENOMEM;
590
591 return 0;
592}
593
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100594void kvm_irqfd_exit(void)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400595{
596 destroy_workqueue(irqfd_cleanup_wq);
597}
Alexander Graf914daba2012-10-09 00:22:59 +0200598#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400599
600/*
601 * --------------------------------------------------------------------
602 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
603 *
604 * userspace can register a PIO/MMIO address with an eventfd for receiving
605 * notification when the memory has been touched.
606 * --------------------------------------------------------------------
607 */
608
609struct _ioeventfd {
610 struct list_head list;
611 u64 addr;
612 int length;
613 struct eventfd_ctx *eventfd;
614 u64 datamatch;
615 struct kvm_io_device dev;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300616 u8 bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400617 bool wildcard;
618};
619
620static inline struct _ioeventfd *
621to_ioeventfd(struct kvm_io_device *dev)
622{
623 return container_of(dev, struct _ioeventfd, dev);
624}
625
626static void
627ioeventfd_release(struct _ioeventfd *p)
628{
629 eventfd_ctx_put(p->eventfd);
630 list_del(&p->list);
631 kfree(p);
632}
633
634static bool
635ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
636{
637 u64 _val;
638
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300639 if (addr != p->addr)
640 /* address must be precise for a hit */
641 return false;
642
643 if (!p->length)
644 /* length = 0 means only look at the address, so always a hit */
645 return true;
646
647 if (len != p->length)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400648 /* address-range must be precise for a hit */
649 return false;
650
651 if (p->wildcard)
652 /* all else equal, wildcard is always a hit */
653 return true;
654
655 /* otherwise, we have to actually compare the data */
656
657 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
658
659 switch (len) {
660 case 1:
661 _val = *(u8 *)val;
662 break;
663 case 2:
664 _val = *(u16 *)val;
665 break;
666 case 4:
667 _val = *(u32 *)val;
668 break;
669 case 8:
670 _val = *(u64 *)val;
671 break;
672 default:
673 return false;
674 }
675
676 return _val == p->datamatch ? true : false;
677}
678
679/* MMIO/PIO writes trigger an event if the addr/val match */
680static int
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000681ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
682 int len, const void *val)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400683{
684 struct _ioeventfd *p = to_ioeventfd(this);
685
686 if (!ioeventfd_in_range(p, addr, len, val))
687 return -EOPNOTSUPP;
688
689 eventfd_signal(p->eventfd, 1);
690 return 0;
691}
692
693/*
694 * This function is called as KVM is completely shutting down. We do not
695 * need to worry about locking just nuke anything we have as quickly as possible
696 */
697static void
698ioeventfd_destructor(struct kvm_io_device *this)
699{
700 struct _ioeventfd *p = to_ioeventfd(this);
701
702 ioeventfd_release(p);
703}
704
705static const struct kvm_io_device_ops ioeventfd_ops = {
706 .write = ioeventfd_write,
707 .destructor = ioeventfd_destructor,
708};
709
710/* assumes kvm->slots_lock held */
711static bool
712ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
713{
714 struct _ioeventfd *_p;
715
716 list_for_each_entry(_p, &kvm->ioeventfds, list)
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300717 if (_p->bus_idx == p->bus_idx &&
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300718 _p->addr == p->addr &&
719 (!_p->length || !p->length ||
720 (_p->length == p->length &&
721 (_p->wildcard || p->wildcard ||
722 _p->datamatch == p->datamatch))))
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400723 return true;
724
725 return false;
726}
727
Cornelia Huck2b834512013-02-28 12:33:20 +0100728static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
729{
730 if (flags & KVM_IOEVENTFD_FLAG_PIO)
731 return KVM_PIO_BUS;
732 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
733 return KVM_VIRTIO_CCW_NOTIFY_BUS;
734 return KVM_MMIO_BUS;
735}
736
Jason Wang85da11c2015-09-15 14:41:55 +0800737static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
738 enum kvm_bus bus_idx,
739 struct kvm_ioeventfd *args)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400740{
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400741
Jason Wang85da11c2015-09-15 14:41:55 +0800742 struct eventfd_ctx *eventfd;
743 struct _ioeventfd *p;
744 int ret;
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300745
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400746 eventfd = eventfd_ctx_fdget(args->fd);
747 if (IS_ERR(eventfd))
748 return PTR_ERR(eventfd);
749
750 p = kzalloc(sizeof(*p), GFP_KERNEL);
751 if (!p) {
752 ret = -ENOMEM;
753 goto fail;
754 }
755
756 INIT_LIST_HEAD(&p->list);
757 p->addr = args->addr;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300758 p->bus_idx = bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400759 p->length = args->len;
760 p->eventfd = eventfd;
761
762 /* The datamatch feature is optional, otherwise this is a wildcard */
763 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
764 p->datamatch = args->datamatch;
765 else
766 p->wildcard = true;
767
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200768 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400769
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300770 /* Verify that there isn't a match already */
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400771 if (ioeventfd_check_collision(kvm, p)) {
772 ret = -EEXIST;
773 goto unlock_fail;
774 }
775
776 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
777
Sasha Levin743eeb02011-07-27 16:00:48 +0300778 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
779 &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400780 if (ret < 0)
781 goto unlock_fail;
782
Amos Kong6ea34c92013-05-25 06:44:15 +0800783 kvm->buses[bus_idx]->ioeventfd_count++;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400784 list_add_tail(&p->list, &kvm->ioeventfds);
785
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200786 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400787
788 return 0;
789
790unlock_fail:
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200791 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400792
793fail:
794 kfree(p);
795 eventfd_ctx_put(eventfd);
796
797 return ret;
798}
799
800static int
Jason Wang85da11c2015-09-15 14:41:55 +0800801kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
802 struct kvm_ioeventfd *args)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400803{
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400804 struct _ioeventfd *p, *tmp;
805 struct eventfd_ctx *eventfd;
806 int ret = -ENOENT;
807
808 eventfd = eventfd_ctx_fdget(args->fd);
809 if (IS_ERR(eventfd))
810 return PTR_ERR(eventfd);
811
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200812 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400813
814 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
815 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
816
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300817 if (p->bus_idx != bus_idx ||
818 p->eventfd != eventfd ||
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400819 p->addr != args->addr ||
820 p->length != args->len ||
821 p->wildcard != wildcard)
822 continue;
823
824 if (!p->wildcard && p->datamatch != args->datamatch)
825 continue;
826
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200827 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
Amos Kong6ea34c92013-05-25 06:44:15 +0800828 kvm->buses[bus_idx]->ioeventfd_count--;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400829 ioeventfd_release(p);
830 ret = 0;
831 break;
832 }
833
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200834 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400835
836 eventfd_ctx_put(eventfd);
837
838 return ret;
839}
840
Jason Wang85da11c2015-09-15 14:41:55 +0800841static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
842{
843 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
Jason Wangeefd6b02015-09-15 14:41:56 +0800844 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
Jason Wang85da11c2015-09-15 14:41:55 +0800845
Jason Wangeefd6b02015-09-15 14:41:56 +0800846 if (!args->len && bus_idx == KVM_MMIO_BUS)
847 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
848
849 return ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800850}
851
852static int
853kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
854{
855 enum kvm_bus bus_idx;
Jason Wangeefd6b02015-09-15 14:41:56 +0800856 int ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800857
858 bus_idx = ioeventfd_bus_from_flags(args->flags);
859 /* must be natural-word sized, or 0 to ignore length */
860 switch (args->len) {
861 case 0:
862 case 1:
863 case 2:
864 case 4:
865 case 8:
866 break;
867 default:
868 return -EINVAL;
869 }
870
871 /* check for range overflow */
872 if (args->addr + args->len < args->addr)
873 return -EINVAL;
874
875 /* check for extra flags that we don't understand */
876 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
877 return -EINVAL;
878
879 /* ioeventfd with no length can't be combined with DATAMATCH */
Jason Wange9ea5062015-09-15 14:41:59 +0800880 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
Jason Wang85da11c2015-09-15 14:41:55 +0800881 return -EINVAL;
882
Jason Wangeefd6b02015-09-15 14:41:56 +0800883 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
884 if (ret)
885 goto fail;
886
887 /* When length is ignored, MMIO is also put on a separate bus, for
888 * faster lookups.
889 */
890 if (!args->len && bus_idx == KVM_MMIO_BUS) {
891 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
892 if (ret < 0)
893 goto fast_fail;
894 }
895
896 return 0;
897
898fast_fail:
899 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
900fail:
901 return ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800902}
903
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400904int
905kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
906{
907 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
908 return kvm_deassign_ioeventfd(kvm, args);
909
910 return kvm_assign_ioeventfd(kvm, args);
911}