blob: f5f61548f60d9b74ee9c5cdd06d1dfcaa194fa19 [file] [log] [blame]
Gregory Haskins721eecbf2009-05-20 10:30:49 -04001/*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Gregory Haskins721eecbf2009-05-20 10:30:49 -04006 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24#include <linux/kvm_host.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040025#include <linux/kvm.h>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040026#include <linux/workqueue.h>
27#include <linux/syscalls.h>
28#include <linux/wait.h>
29#include <linux/poll.h>
30#include <linux/file.h>
31#include <linux/list.h>
32#include <linux/eventfd.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040033#include <linux/kernel.h>
Christian Borntraeger719d93c2014-01-16 13:44:20 +010034#include <linux/srcu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Paul Mackerras56f89f362014-06-30 20:51:09 +100036#include <linux/seqlock.h>
Paul Mackerrase4d57e12014-06-30 20:51:12 +100037#include <trace/events/kvm.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040038
Paul Mackerrase4d57e12014-06-30 20:51:12 +100039#include "irq.h"
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040040#include "iodev.h"
Gregory Haskins721eecbf2009-05-20 10:30:49 -040041
Paul Mackerras297e2102014-06-30 20:51:13 +100042#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -040043/*
44 * --------------------------------------------------------------------
45 * irqfd: Allows an fd to be used to inject an interrupt to the guest
46 *
47 * Credit goes to Avi Kivity for the original idea.
48 * --------------------------------------------------------------------
49 */
50
Alex Williamson7a844282012-09-21 11:58:03 -060051/*
52 * Resampling irqfds are a special variety of irqfds used to emulate
53 * level triggered interrupts. The interrupt is asserted on eventfd
54 * trigger. On acknowledgement through the irq ack notifier, the
55 * interrupt is de-asserted and userspace is notified through the
56 * resamplefd. All resamplers on the same gsi are de-asserted
57 * together, so we don't need to track the state of each individual
58 * user. We can also therefore share the same irq source ID.
59 */
60struct _irqfd_resampler {
61 struct kvm *kvm;
62 /*
63 * List of resampling struct _irqfd objects sharing this gsi.
64 * RCU list modified under kvm->irqfds.resampler_lock
65 */
66 struct list_head list;
67 struct kvm_irq_ack_notifier notifier;
68 /*
69 * Entry in list of kvm->irqfd.resampler_list. Use for sharing
70 * resamplers among irqfds on the same gsi.
71 * Accessed and modified under kvm->irqfds.resampler_lock
72 */
73 struct list_head link;
74};
75
Gregory Haskins721eecbf2009-05-20 10:30:49 -040076struct _irqfd {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +020077 /* Used for MSI fast-path */
78 struct kvm *kvm;
79 wait_queue_t wait;
80 /* Update side is protected by irqfds.lock */
Paul Mackerras56f89f362014-06-30 20:51:09 +100081 struct kvm_kernel_irq_routing_entry irq_entry;
82 seqcount_t irq_entry_sc;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +020083 /* Used for level IRQ fast-path */
84 int gsi;
85 struct work_struct inject;
Alex Williamson7a844282012-09-21 11:58:03 -060086 /* The resampler used by this irqfd (resampler-only) */
87 struct _irqfd_resampler *resampler;
88 /* Eventfd notified on resample (resampler-only) */
89 struct eventfd_ctx *resamplefd;
90 /* Entry in list of irqfds for a resampler (resampler-only) */
91 struct list_head resampler_link;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +020092 /* Used for setup/shutdown */
93 struct eventfd_ctx *eventfd;
94 struct list_head list;
95 poll_table pt;
96 struct work_struct shutdown;
Gregory Haskins721eecbf2009-05-20 10:30:49 -040097};
98
99static struct workqueue_struct *irqfd_cleanup_wq;
100
101static void
102irqfd_inject(struct work_struct *work)
103{
104 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
105 struct kvm *kvm = irqfd->kvm;
106
Alex Williamson7a844282012-09-21 11:58:03 -0600107 if (!irqfd->resampler) {
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800108 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
109 false);
110 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
111 false);
Alex Williamson7a844282012-09-21 11:58:03 -0600112 } else
113 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800114 irqfd->gsi, 1, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600115}
116
117/*
118 * Since resampler irqfds share an IRQ source ID, we de-assert once
119 * then notify all of the resampler irqfds using this GSI. We can't
120 * do multiple de-asserts or we risk racing with incoming re-asserts.
121 */
122static void
123irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
124{
125 struct _irqfd_resampler *resampler;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100126 struct kvm *kvm;
Alex Williamson7a844282012-09-21 11:58:03 -0600127 struct _irqfd *irqfd;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100128 int idx;
Alex Williamson7a844282012-09-21 11:58:03 -0600129
130 resampler = container_of(kian, struct _irqfd_resampler, notifier);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100131 kvm = resampler->kvm;
Alex Williamson7a844282012-09-21 11:58:03 -0600132
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100133 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800134 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600135
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100136 idx = srcu_read_lock(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600137
138 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
139 eventfd_signal(irqfd->resamplefd, 1);
140
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100141 srcu_read_unlock(&kvm->irq_srcu, idx);
Alex Williamson7a844282012-09-21 11:58:03 -0600142}
143
144static void
145irqfd_resampler_shutdown(struct _irqfd *irqfd)
146{
147 struct _irqfd_resampler *resampler = irqfd->resampler;
148 struct kvm *kvm = resampler->kvm;
149
150 mutex_lock(&kvm->irqfds.resampler_lock);
151
152 list_del_rcu(&irqfd->resampler_link);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100153 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600154
155 if (list_empty(&resampler->list)) {
156 list_del(&resampler->link);
157 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
158 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800159 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600160 kfree(resampler);
161 }
162
163 mutex_unlock(&kvm->irqfds.resampler_lock);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400164}
165
166/*
167 * Race-free decouple logic (ordering is critical)
168 */
169static void
170irqfd_shutdown(struct work_struct *work)
171{
172 struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200173 u64 cnt;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400174
175 /*
176 * Synchronize with the wait-queue and unhook ourselves to prevent
177 * further events.
178 */
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200179 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400180
181 /*
182 * We know no new events will be scheduled at this point, so block
183 * until all previously outstanding events have completed
184 */
Tejun Heo43829732012-08-20 14:51:24 -0700185 flush_work(&irqfd->inject);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400186
Alex Williamson7a844282012-09-21 11:58:03 -0600187 if (irqfd->resampler) {
188 irqfd_resampler_shutdown(irqfd);
189 eventfd_ctx_put(irqfd->resamplefd);
190 }
191
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400192 /*
193 * It is now safe to release the object's resources
194 */
195 eventfd_ctx_put(irqfd->eventfd);
196 kfree(irqfd);
197}
198
199
200/* assumes kvm->irqfds.lock is held */
201static bool
202irqfd_is_active(struct _irqfd *irqfd)
203{
204 return list_empty(&irqfd->list) ? false : true;
205}
206
207/*
208 * Mark the irqfd as inactive and schedule it for removal
209 *
210 * assumes kvm->irqfds.lock is held
211 */
212static void
213irqfd_deactivate(struct _irqfd *irqfd)
214{
215 BUG_ON(!irqfd_is_active(irqfd));
216
217 list_del_init(&irqfd->list);
218
219 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
220}
221
222/*
223 * Called with wqh->lock held and interrupts disabled
224 */
225static int
226irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
227{
228 struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
229 unsigned long flags = (unsigned long)key;
Paul Mackerras56f89f362014-06-30 20:51:09 +1000230 struct kvm_kernel_irq_routing_entry irq;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200231 struct kvm *kvm = irqfd->kvm;
Paul Mackerras56f89f362014-06-30 20:51:09 +1000232 unsigned seq;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100233 int idx;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400234
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200235 if (flags & POLLIN) {
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100236 idx = srcu_read_lock(&kvm->irq_srcu);
Paul Mackerras56f89f362014-06-30 20:51:09 +1000237 do {
238 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
239 irq = irqfd->irq_entry;
240 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400241 /* An event has been signaled, inject an interrupt */
Paul Mackerras56f89f362014-06-30 20:51:09 +1000242 if (irq.type == KVM_IRQ_ROUTING_MSI)
243 kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800244 false);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200245 else
246 schedule_work(&irqfd->inject);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100247 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200248 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400249
250 if (flags & POLLHUP) {
251 /* The eventfd is closing, detach from KVM */
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400252 unsigned long flags;
253
254 spin_lock_irqsave(&kvm->irqfds.lock, flags);
255
256 /*
257 * We must check if someone deactivated the irqfd before
258 * we could acquire the irqfds.lock since the item is
259 * deactivated from the KVM side before it is unhooked from
260 * the wait-queue. If it is already deactivated, we can
261 * simply return knowing the other side will cleanup for us.
262 * We cannot race against the irqfd going away since the
263 * other side is required to acquire wqh->lock, which we hold
264 */
265 if (irqfd_is_active(irqfd))
266 irqfd_deactivate(irqfd);
267
268 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
269 }
270
271 return 0;
272}
273
274static void
275irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
276 poll_table *pt)
277{
278 struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400279 add_wait_queue(wqh, &irqfd->wait);
280}
281
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200282/* Must be called under irqfds.lock */
Paul Mackerras9957c862014-06-30 20:51:11 +1000283static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200284{
285 struct kvm_kernel_irq_routing_entry *e;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000286 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
287 int i, n_entries;
288
Paul Mackerras9957c862014-06-30 20:51:11 +1000289 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200290
Paul Mackerras56f89f362014-06-30 20:51:09 +1000291 write_seqcount_begin(&irqfd->irq_entry_sc);
292
293 irqfd->irq_entry.type = 0;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200294
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000295 e = entries;
296 for (i = 0; i < n_entries; ++i, ++e) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200297 /* Only fast-path MSI. */
298 if (e->type == KVM_IRQ_ROUTING_MSI)
Paul Mackerras56f89f362014-06-30 20:51:09 +1000299 irqfd->irq_entry = *e;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200300 }
Paul Mackerras56f89f362014-06-30 20:51:09 +1000301
Paul Mackerras56f89f362014-06-30 20:51:09 +1000302 write_seqcount_end(&irqfd->irq_entry_sc);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200303}
304
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400305static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600306kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400307{
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200308 struct _irqfd *irqfd, *tmp;
Al Virocffe78d2013-08-30 15:47:17 -0400309 struct fd f;
Alex Williamson7a844282012-09-21 11:58:03 -0600310 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400311 int ret;
312 unsigned int events;
Paul Mackerras9957c862014-06-30 20:51:11 +1000313 int idx;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400314
315 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
316 if (!irqfd)
317 return -ENOMEM;
318
319 irqfd->kvm = kvm;
Alex Williamsond4db2932012-06-29 09:56:08 -0600320 irqfd->gsi = args->gsi;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400321 INIT_LIST_HEAD(&irqfd->list);
322 INIT_WORK(&irqfd->inject, irqfd_inject);
323 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
Paul Mackerras56f89f362014-06-30 20:51:09 +1000324 seqcount_init(&irqfd->irq_entry_sc);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400325
Al Virocffe78d2013-08-30 15:47:17 -0400326 f = fdget(args->fd);
327 if (!f.file) {
328 ret = -EBADF;
329 goto out;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400330 }
331
Al Virocffe78d2013-08-30 15:47:17 -0400332 eventfd = eventfd_ctx_fileget(f.file);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400333 if (IS_ERR(eventfd)) {
334 ret = PTR_ERR(eventfd);
335 goto fail;
336 }
337
338 irqfd->eventfd = eventfd;
339
Alex Williamson7a844282012-09-21 11:58:03 -0600340 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
341 struct _irqfd_resampler *resampler;
342
343 resamplefd = eventfd_ctx_fdget(args->resamplefd);
344 if (IS_ERR(resamplefd)) {
345 ret = PTR_ERR(resamplefd);
346 goto fail;
347 }
348
349 irqfd->resamplefd = resamplefd;
350 INIT_LIST_HEAD(&irqfd->resampler_link);
351
352 mutex_lock(&kvm->irqfds.resampler_lock);
353
354 list_for_each_entry(resampler,
Alex Williamson49f8a1a2012-12-06 14:44:59 -0700355 &kvm->irqfds.resampler_list, link) {
Alex Williamson7a844282012-09-21 11:58:03 -0600356 if (resampler->notifier.gsi == irqfd->gsi) {
357 irqfd->resampler = resampler;
358 break;
359 }
360 }
361
362 if (!irqfd->resampler) {
363 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
364 if (!resampler) {
365 ret = -ENOMEM;
366 mutex_unlock(&kvm->irqfds.resampler_lock);
367 goto fail;
368 }
369
370 resampler->kvm = kvm;
371 INIT_LIST_HEAD(&resampler->list);
372 resampler->notifier.gsi = irqfd->gsi;
373 resampler->notifier.irq_acked = irqfd_resampler_ack;
374 INIT_LIST_HEAD(&resampler->link);
375
376 list_add(&resampler->link, &kvm->irqfds.resampler_list);
377 kvm_register_irq_ack_notifier(kvm,
378 &resampler->notifier);
379 irqfd->resampler = resampler;
380 }
381
382 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100383 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600384
385 mutex_unlock(&kvm->irqfds.resampler_lock);
386 }
387
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400388 /*
389 * Install our own custom wake-up handling so we are notified via
390 * a callback whenever someone signals the underlying eventfd
391 */
392 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
393 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
394
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200395 spin_lock_irq(&kvm->irqfds.lock);
396
397 ret = 0;
398 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
399 if (irqfd->eventfd != tmp->eventfd)
400 continue;
401 /* This fd is used for another irq already. */
402 ret = -EBUSY;
403 spin_unlock_irq(&kvm->irqfds.lock);
404 goto fail;
405 }
406
Paul Mackerras9957c862014-06-30 20:51:11 +1000407 idx = srcu_read_lock(&kvm->irq_srcu);
408 irqfd_update(kvm, irqfd);
409 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200410
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400411 list_add_tail(&irqfd->list, &kvm->irqfds.items);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400412
Cornelia Huck684a0b72014-03-17 19:11:35 +0100413 spin_unlock_irq(&kvm->irqfds.lock);
414
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400415 /*
416 * Check if there was an event already pending on the eventfd
417 * before we registered, and trigger it as if we didn't miss it.
418 */
Cornelia Huck684a0b72014-03-17 19:11:35 +0100419 events = f.file->f_op->poll(f.file, &irqfd->pt);
420
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400421 if (events & POLLIN)
422 schedule_work(&irqfd->inject);
423
424 /*
425 * do not drop the file until the irqfd is fully initialized, otherwise
426 * we might race against the POLLHUP
427 */
Al Virocffe78d2013-08-30 15:47:17 -0400428 fdput(f);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400429
430 return 0;
431
432fail:
Alex Williamson7a844282012-09-21 11:58:03 -0600433 if (irqfd->resampler)
434 irqfd_resampler_shutdown(irqfd);
435
436 if (resamplefd && !IS_ERR(resamplefd))
437 eventfd_ctx_put(resamplefd);
438
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400439 if (eventfd && !IS_ERR(eventfd))
440 eventfd_ctx_put(eventfd);
441
Al Virocffe78d2013-08-30 15:47:17 -0400442 fdput(f);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400443
Al Virocffe78d2013-08-30 15:47:17 -0400444out:
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400445 kfree(irqfd);
446 return ret;
447}
Alexander Graf914daba2012-10-09 00:22:59 +0200448#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400449
450void
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400451kvm_eventfd_init(struct kvm *kvm)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400452{
Paul Mackerras297e2102014-06-30 20:51:13 +1000453#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400454 spin_lock_init(&kvm->irqfds.lock);
455 INIT_LIST_HEAD(&kvm->irqfds.items);
Alex Williamson7a844282012-09-21 11:58:03 -0600456 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
457 mutex_init(&kvm->irqfds.resampler_lock);
Alexander Graf914daba2012-10-09 00:22:59 +0200458#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400459 INIT_LIST_HEAD(&kvm->ioeventfds);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400460}
461
Paul Mackerras297e2102014-06-30 20:51:13 +1000462#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400463/*
464 * shutdown any irqfd's that match fd+gsi
465 */
466static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600467kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400468{
469 struct _irqfd *irqfd, *tmp;
470 struct eventfd_ctx *eventfd;
471
Alex Williamsond4db2932012-06-29 09:56:08 -0600472 eventfd = eventfd_ctx_fdget(args->fd);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400473 if (IS_ERR(eventfd))
474 return PTR_ERR(eventfd);
475
476 spin_lock_irq(&kvm->irqfds.lock);
477
478 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
Alex Williamsond4db2932012-06-29 09:56:08 -0600479 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200480 /*
Paul Mackerras56f89f362014-06-30 20:51:09 +1000481 * This clearing of irq_entry.type is needed for when
Michael S. Tsirkinc8ce0572011-03-06 13:03:26 +0200482 * another thread calls kvm_irq_routing_update before
483 * we flush workqueue below (we synchronize with
484 * kvm_irq_routing_update using irqfds.lock).
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200485 */
Paul Mackerras56f89f362014-06-30 20:51:09 +1000486 write_seqcount_begin(&irqfd->irq_entry_sc);
487 irqfd->irq_entry.type = 0;
488 write_seqcount_end(&irqfd->irq_entry_sc);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400489 irqfd_deactivate(irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200490 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400491 }
492
493 spin_unlock_irq(&kvm->irqfds.lock);
494 eventfd_ctx_put(eventfd);
495
496 /*
497 * Block until we know all outstanding shutdown jobs have completed
498 * so that we guarantee there will not be any more interrupts on this
499 * gsi once this deassign function returns.
500 */
501 flush_workqueue(irqfd_cleanup_wq);
502
503 return 0;
504}
505
506int
Alex Williamsond4db2932012-06-29 09:56:08 -0600507kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400508{
Alex Williamson7a844282012-09-21 11:58:03 -0600509 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
Alex Williamson326cf032012-06-29 09:56:24 -0600510 return -EINVAL;
511
Alex Williamsond4db2932012-06-29 09:56:08 -0600512 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
513 return kvm_irqfd_deassign(kvm, args);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400514
Alex Williamsond4db2932012-06-29 09:56:08 -0600515 return kvm_irqfd_assign(kvm, args);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400516}
517
518/*
519 * This function is called as the kvm VM fd is being released. Shutdown all
520 * irqfds that still remain open
521 */
522void
523kvm_irqfd_release(struct kvm *kvm)
524{
525 struct _irqfd *irqfd, *tmp;
526
527 spin_lock_irq(&kvm->irqfds.lock);
528
529 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
530 irqfd_deactivate(irqfd);
531
532 spin_unlock_irq(&kvm->irqfds.lock);
533
534 /*
535 * Block until we know all outstanding shutdown jobs have completed
536 * since we do not take a kvm* reference.
537 */
538 flush_workqueue(irqfd_cleanup_wq);
539
540}
541
542/*
Paul Mackerras9957c862014-06-30 20:51:11 +1000543 * Take note of a change in irq routing.
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100544 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200545 */
Paul Mackerras9957c862014-06-30 20:51:11 +1000546void kvm_irq_routing_update(struct kvm *kvm)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200547{
548 struct _irqfd *irqfd;
549
550 spin_lock_irq(&kvm->irqfds.lock);
551
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200552 list_for_each_entry(irqfd, &kvm->irqfds.items, list)
Paul Mackerras9957c862014-06-30 20:51:11 +1000553 irqfd_update(kvm, irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200554
555 spin_unlock_irq(&kvm->irqfds.lock);
556}
557
558/*
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400559 * create a host-wide workqueue for issuing deferred shutdown requests
560 * aggregated from all vm* instances. We need our own isolated single-thread
561 * queue to prevent deadlock against flushing the normal work-queue.
562 */
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100563int kvm_irqfd_init(void)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400564{
565 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
566 if (!irqfd_cleanup_wq)
567 return -ENOMEM;
568
569 return 0;
570}
571
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100572void kvm_irqfd_exit(void)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400573{
574 destroy_workqueue(irqfd_cleanup_wq);
575}
Alexander Graf914daba2012-10-09 00:22:59 +0200576#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400577
578/*
579 * --------------------------------------------------------------------
580 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
581 *
582 * userspace can register a PIO/MMIO address with an eventfd for receiving
583 * notification when the memory has been touched.
584 * --------------------------------------------------------------------
585 */
586
587struct _ioeventfd {
588 struct list_head list;
589 u64 addr;
590 int length;
591 struct eventfd_ctx *eventfd;
592 u64 datamatch;
593 struct kvm_io_device dev;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300594 u8 bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400595 bool wildcard;
596};
597
598static inline struct _ioeventfd *
599to_ioeventfd(struct kvm_io_device *dev)
600{
601 return container_of(dev, struct _ioeventfd, dev);
602}
603
604static void
605ioeventfd_release(struct _ioeventfd *p)
606{
607 eventfd_ctx_put(p->eventfd);
608 list_del(&p->list);
609 kfree(p);
610}
611
612static bool
613ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
614{
615 u64 _val;
616
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300617 if (addr != p->addr)
618 /* address must be precise for a hit */
619 return false;
620
621 if (!p->length)
622 /* length = 0 means only look at the address, so always a hit */
623 return true;
624
625 if (len != p->length)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400626 /* address-range must be precise for a hit */
627 return false;
628
629 if (p->wildcard)
630 /* all else equal, wildcard is always a hit */
631 return true;
632
633 /* otherwise, we have to actually compare the data */
634
635 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
636
637 switch (len) {
638 case 1:
639 _val = *(u8 *)val;
640 break;
641 case 2:
642 _val = *(u16 *)val;
643 break;
644 case 4:
645 _val = *(u32 *)val;
646 break;
647 case 8:
648 _val = *(u64 *)val;
649 break;
650 default:
651 return false;
652 }
653
654 return _val == p->datamatch ? true : false;
655}
656
657/* MMIO/PIO writes trigger an event if the addr/val match */
658static int
659ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
660 const void *val)
661{
662 struct _ioeventfd *p = to_ioeventfd(this);
663
664 if (!ioeventfd_in_range(p, addr, len, val))
665 return -EOPNOTSUPP;
666
667 eventfd_signal(p->eventfd, 1);
668 return 0;
669}
670
671/*
672 * This function is called as KVM is completely shutting down. We do not
673 * need to worry about locking just nuke anything we have as quickly as possible
674 */
675static void
676ioeventfd_destructor(struct kvm_io_device *this)
677{
678 struct _ioeventfd *p = to_ioeventfd(this);
679
680 ioeventfd_release(p);
681}
682
683static const struct kvm_io_device_ops ioeventfd_ops = {
684 .write = ioeventfd_write,
685 .destructor = ioeventfd_destructor,
686};
687
688/* assumes kvm->slots_lock held */
689static bool
690ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
691{
692 struct _ioeventfd *_p;
693
694 list_for_each_entry(_p, &kvm->ioeventfds, list)
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300695 if (_p->bus_idx == p->bus_idx &&
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300696 _p->addr == p->addr &&
697 (!_p->length || !p->length ||
698 (_p->length == p->length &&
699 (_p->wildcard || p->wildcard ||
700 _p->datamatch == p->datamatch))))
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400701 return true;
702
703 return false;
704}
705
Cornelia Huck2b834512013-02-28 12:33:20 +0100706static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
707{
708 if (flags & KVM_IOEVENTFD_FLAG_PIO)
709 return KVM_PIO_BUS;
710 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
711 return KVM_VIRTIO_CCW_NOTIFY_BUS;
712 return KVM_MMIO_BUS;
713}
714
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400715static int
716kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
717{
Cornelia Huck2b834512013-02-28 12:33:20 +0100718 enum kvm_bus bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400719 struct _ioeventfd *p;
720 struct eventfd_ctx *eventfd;
721 int ret;
722
Cornelia Huck2b834512013-02-28 12:33:20 +0100723 bus_idx = ioeventfd_bus_from_flags(args->flags);
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300724 /* must be natural-word sized, or 0 to ignore length */
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400725 switch (args->len) {
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300726 case 0:
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400727 case 1:
728 case 2:
729 case 4:
730 case 8:
731 break;
732 default:
733 return -EINVAL;
734 }
735
736 /* check for range overflow */
737 if (args->addr + args->len < args->addr)
738 return -EINVAL;
739
740 /* check for extra flags that we don't understand */
741 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
742 return -EINVAL;
743
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300744 /* ioeventfd with no length can't be combined with DATAMATCH */
745 if (!args->len &&
746 args->flags & (KVM_IOEVENTFD_FLAG_PIO |
747 KVM_IOEVENTFD_FLAG_DATAMATCH))
748 return -EINVAL;
749
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400750 eventfd = eventfd_ctx_fdget(args->fd);
751 if (IS_ERR(eventfd))
752 return PTR_ERR(eventfd);
753
754 p = kzalloc(sizeof(*p), GFP_KERNEL);
755 if (!p) {
756 ret = -ENOMEM;
757 goto fail;
758 }
759
760 INIT_LIST_HEAD(&p->list);
761 p->addr = args->addr;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300762 p->bus_idx = bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400763 p->length = args->len;
764 p->eventfd = eventfd;
765
766 /* The datamatch feature is optional, otherwise this is a wildcard */
767 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
768 p->datamatch = args->datamatch;
769 else
770 p->wildcard = true;
771
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200772 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400773
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300774 /* Verify that there isn't a match already */
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400775 if (ioeventfd_check_collision(kvm, p)) {
776 ret = -EEXIST;
777 goto unlock_fail;
778 }
779
780 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
781
Sasha Levin743eeb02011-07-27 16:00:48 +0300782 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
783 &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400784 if (ret < 0)
785 goto unlock_fail;
786
Michael S. Tsirkin68c3b4d2014-03-31 21:50:44 +0300787 /* When length is ignored, MMIO is also put on a separate bus, for
788 * faster lookups.
789 */
790 if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
791 ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
792 p->addr, 0, &p->dev);
793 if (ret < 0)
794 goto register_fail;
795 }
796
Amos Kong6ea34c92013-05-25 06:44:15 +0800797 kvm->buses[bus_idx]->ioeventfd_count++;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400798 list_add_tail(&p->list, &kvm->ioeventfds);
799
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200800 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400801
802 return 0;
803
Michael S. Tsirkin68c3b4d2014-03-31 21:50:44 +0300804register_fail:
805 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400806unlock_fail:
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200807 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400808
809fail:
810 kfree(p);
811 eventfd_ctx_put(eventfd);
812
813 return ret;
814}
815
816static int
817kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
818{
Cornelia Huck2b834512013-02-28 12:33:20 +0100819 enum kvm_bus bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400820 struct _ioeventfd *p, *tmp;
821 struct eventfd_ctx *eventfd;
822 int ret = -ENOENT;
823
Cornelia Huck2b834512013-02-28 12:33:20 +0100824 bus_idx = ioeventfd_bus_from_flags(args->flags);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400825 eventfd = eventfd_ctx_fdget(args->fd);
826 if (IS_ERR(eventfd))
827 return PTR_ERR(eventfd);
828
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200829 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400830
831 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
832 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
833
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300834 if (p->bus_idx != bus_idx ||
835 p->eventfd != eventfd ||
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400836 p->addr != args->addr ||
837 p->length != args->len ||
838 p->wildcard != wildcard)
839 continue;
840
841 if (!p->wildcard && p->datamatch != args->datamatch)
842 continue;
843
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200844 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
Michael S. Tsirkin68c3b4d2014-03-31 21:50:44 +0300845 if (!p->length) {
846 kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
847 &p->dev);
848 }
Amos Kong6ea34c92013-05-25 06:44:15 +0800849 kvm->buses[bus_idx]->ioeventfd_count--;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400850 ioeventfd_release(p);
851 ret = 0;
852 break;
853 }
854
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200855 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400856
857 eventfd_ctx_put(eventfd);
858
859 return ret;
860}
861
862int
863kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
864{
865 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
866 return kvm_deassign_ioeventfd(kvm, args);
867
868 return kvm_assign_ioeventfd(kvm, args);
869}
Paul Mackerrase4d57e12014-06-30 20:51:12 +1000870
871bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
872{
873 struct kvm_irq_ack_notifier *kian;
874 int gsi, idx;
875
876 idx = srcu_read_lock(&kvm->irq_srcu);
877 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
878 if (gsi != -1)
879 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
880 link)
881 if (kian->gsi == gsi) {
882 srcu_read_unlock(&kvm->irq_srcu, idx);
883 return true;
884 }
885
886 srcu_read_unlock(&kvm->irq_srcu, idx);
887
888 return false;
889}
890EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
891
892void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
893{
894 struct kvm_irq_ack_notifier *kian;
895 int gsi, idx;
896
897 trace_kvm_ack_irq(irqchip, pin);
898
899 idx = srcu_read_lock(&kvm->irq_srcu);
900 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
901 if (gsi != -1)
902 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
903 link)
904 if (kian->gsi == gsi)
905 kian->irq_acked(kian);
906 srcu_read_unlock(&kvm->irq_srcu, idx);
907}
908
909void kvm_register_irq_ack_notifier(struct kvm *kvm,
910 struct kvm_irq_ack_notifier *kian)
911{
912 mutex_lock(&kvm->irq_lock);
913 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
914 mutex_unlock(&kvm->irq_lock);
915#ifdef __KVM_HAVE_IOAPIC
916 kvm_vcpu_request_scan_ioapic(kvm);
917#endif
918}
919
920void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
921 struct kvm_irq_ack_notifier *kian)
922{
923 mutex_lock(&kvm->irq_lock);
924 hlist_del_init_rcu(&kian->link);
925 mutex_unlock(&kvm->irq_lock);
926 synchronize_srcu(&kvm->irq_srcu);
927#ifdef __KVM_HAVE_IOAPIC
928 kvm_vcpu_request_scan_ioapic(kvm);
929#endif
930}