Antonios Motakis | 7e992d6 | 2015-03-16 14:08:54 -0600 | [diff] [blame] | 1 | /* |
| 2 | * VFIO generic eventfd code for IRQFD support. |
| 3 | * Derived from drivers/vfio/pci/vfio_pci_intrs.c |
| 4 | * |
| 5 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. |
| 6 | * Author: Alex Williamson <alex.williamson@redhat.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/vfio.h> |
| 14 | #include <linux/eventfd.h> |
| 15 | #include <linux/file.h> |
| 16 | #include <linux/slab.h> |
| 17 | |
| 18 | static struct workqueue_struct *vfio_irqfd_cleanup_wq; |
kbuild test robot | 66fdc05 | 2015-03-17 07:43:21 +0800 | [diff] [blame^] | 19 | static DEFINE_SPINLOCK(virqfd_lock); |
Antonios Motakis | 7e992d6 | 2015-03-16 14:08:54 -0600 | [diff] [blame] | 20 | |
| 21 | int __init vfio_virqfd_init(void) |
| 22 | { |
| 23 | vfio_irqfd_cleanup_wq = |
| 24 | create_singlethread_workqueue("vfio-irqfd-cleanup"); |
| 25 | if (!vfio_irqfd_cleanup_wq) |
| 26 | return -ENOMEM; |
| 27 | |
| 28 | return 0; |
| 29 | } |
| 30 | |
| 31 | void vfio_virqfd_exit(void) |
| 32 | { |
| 33 | destroy_workqueue(vfio_irqfd_cleanup_wq); |
| 34 | } |
| 35 | |
| 36 | static void virqfd_deactivate(struct virqfd *virqfd) |
| 37 | { |
| 38 | queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown); |
| 39 | } |
| 40 | |
| 41 | static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) |
| 42 | { |
| 43 | struct virqfd *virqfd = container_of(wait, struct virqfd, wait); |
| 44 | unsigned long flags = (unsigned long)key; |
| 45 | |
| 46 | if (flags & POLLIN) { |
| 47 | /* An event has been signaled, call function */ |
| 48 | if ((!virqfd->handler || |
| 49 | virqfd->handler(virqfd->opaque, virqfd->data)) && |
| 50 | virqfd->thread) |
| 51 | schedule_work(&virqfd->inject); |
| 52 | } |
| 53 | |
| 54 | if (flags & POLLHUP) { |
| 55 | unsigned long flags; |
| 56 | spin_lock_irqsave(&virqfd_lock, flags); |
| 57 | |
| 58 | /* |
| 59 | * The eventfd is closing, if the virqfd has not yet been |
| 60 | * queued for release, as determined by testing whether the |
| 61 | * virqfd pointer to it is still valid, queue it now. As |
| 62 | * with kvm irqfds, we know we won't race against the virqfd |
| 63 | * going away because we hold the lock to get here. |
| 64 | */ |
| 65 | if (*(virqfd->pvirqfd) == virqfd) { |
| 66 | *(virqfd->pvirqfd) = NULL; |
| 67 | virqfd_deactivate(virqfd); |
| 68 | } |
| 69 | |
| 70 | spin_unlock_irqrestore(&virqfd_lock, flags); |
| 71 | } |
| 72 | |
| 73 | return 0; |
| 74 | } |
| 75 | |
| 76 | static void virqfd_ptable_queue_proc(struct file *file, |
| 77 | wait_queue_head_t *wqh, poll_table *pt) |
| 78 | { |
| 79 | struct virqfd *virqfd = container_of(pt, struct virqfd, pt); |
| 80 | add_wait_queue(wqh, &virqfd->wait); |
| 81 | } |
| 82 | |
| 83 | static void virqfd_shutdown(struct work_struct *work) |
| 84 | { |
| 85 | struct virqfd *virqfd = container_of(work, struct virqfd, shutdown); |
| 86 | u64 cnt; |
| 87 | |
| 88 | eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt); |
| 89 | flush_work(&virqfd->inject); |
| 90 | eventfd_ctx_put(virqfd->eventfd); |
| 91 | |
| 92 | kfree(virqfd); |
| 93 | } |
| 94 | |
| 95 | static void virqfd_inject(struct work_struct *work) |
| 96 | { |
| 97 | struct virqfd *virqfd = container_of(work, struct virqfd, inject); |
| 98 | if (virqfd->thread) |
| 99 | virqfd->thread(virqfd->opaque, virqfd->data); |
| 100 | } |
| 101 | |
| 102 | int vfio_virqfd_enable(void *opaque, |
| 103 | int (*handler)(void *, void *), |
| 104 | void (*thread)(void *, void *), |
| 105 | void *data, struct virqfd **pvirqfd, int fd) |
| 106 | { |
| 107 | struct fd irqfd; |
| 108 | struct eventfd_ctx *ctx; |
| 109 | struct virqfd *virqfd; |
| 110 | int ret = 0; |
| 111 | unsigned int events; |
| 112 | |
| 113 | virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL); |
| 114 | if (!virqfd) |
| 115 | return -ENOMEM; |
| 116 | |
| 117 | virqfd->pvirqfd = pvirqfd; |
| 118 | virqfd->opaque = opaque; |
| 119 | virqfd->handler = handler; |
| 120 | virqfd->thread = thread; |
| 121 | virqfd->data = data; |
| 122 | |
| 123 | INIT_WORK(&virqfd->shutdown, virqfd_shutdown); |
| 124 | INIT_WORK(&virqfd->inject, virqfd_inject); |
| 125 | |
| 126 | irqfd = fdget(fd); |
| 127 | if (!irqfd.file) { |
| 128 | ret = -EBADF; |
| 129 | goto err_fd; |
| 130 | } |
| 131 | |
| 132 | ctx = eventfd_ctx_fileget(irqfd.file); |
| 133 | if (IS_ERR(ctx)) { |
| 134 | ret = PTR_ERR(ctx); |
| 135 | goto err_ctx; |
| 136 | } |
| 137 | |
| 138 | virqfd->eventfd = ctx; |
| 139 | |
| 140 | /* |
| 141 | * virqfds can be released by closing the eventfd or directly |
| 142 | * through ioctl. These are both done through a workqueue, so |
| 143 | * we update the pointer to the virqfd under lock to avoid |
| 144 | * pushing multiple jobs to release the same virqfd. |
| 145 | */ |
| 146 | spin_lock_irq(&virqfd_lock); |
| 147 | |
| 148 | if (*pvirqfd) { |
| 149 | spin_unlock_irq(&virqfd_lock); |
| 150 | ret = -EBUSY; |
| 151 | goto err_busy; |
| 152 | } |
| 153 | *pvirqfd = virqfd; |
| 154 | |
| 155 | spin_unlock_irq(&virqfd_lock); |
| 156 | |
| 157 | /* |
| 158 | * Install our own custom wake-up handling so we are notified via |
| 159 | * a callback whenever someone signals the underlying eventfd. |
| 160 | */ |
| 161 | init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup); |
| 162 | init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc); |
| 163 | |
| 164 | events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt); |
| 165 | |
| 166 | /* |
| 167 | * Check if there was an event already pending on the eventfd |
| 168 | * before we registered and trigger it as if we didn't miss it. |
| 169 | */ |
| 170 | if (events & POLLIN) { |
| 171 | if ((!handler || handler(opaque, data)) && thread) |
| 172 | schedule_work(&virqfd->inject); |
| 173 | } |
| 174 | |
| 175 | /* |
| 176 | * Do not drop the file until the irqfd is fully initialized, |
| 177 | * otherwise we might race against the POLLHUP. |
| 178 | */ |
| 179 | fdput(irqfd); |
| 180 | |
| 181 | return 0; |
| 182 | err_busy: |
| 183 | eventfd_ctx_put(ctx); |
| 184 | err_ctx: |
| 185 | fdput(irqfd); |
| 186 | err_fd: |
| 187 | kfree(virqfd); |
| 188 | |
| 189 | return ret; |
| 190 | } |
| 191 | EXPORT_SYMBOL_GPL(vfio_virqfd_enable); |
| 192 | |
| 193 | void vfio_virqfd_disable(struct virqfd **pvirqfd) |
| 194 | { |
| 195 | unsigned long flags; |
| 196 | |
| 197 | spin_lock_irqsave(&virqfd_lock, flags); |
| 198 | |
| 199 | if (*pvirqfd) { |
| 200 | virqfd_deactivate(*pvirqfd); |
| 201 | *pvirqfd = NULL; |
| 202 | } |
| 203 | |
| 204 | spin_unlock_irqrestore(&virqfd_lock, flags); |
| 205 | |
| 206 | /* |
| 207 | * Block until we know all outstanding shutdown jobs have completed. |
| 208 | * Even if we don't queue the job, flush the wq to be sure it's |
| 209 | * been released. |
| 210 | */ |
| 211 | flush_workqueue(vfio_irqfd_cleanup_wq); |
| 212 | } |
| 213 | EXPORT_SYMBOL_GPL(vfio_virqfd_disable); |