Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 2 | /* |
| 3 | * fs/eventfd.c |
| 4 | * |
| 5 | * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> |
| 6 | * |
| 7 | */ |
| 8 | |
| 9 | #include <linux/file.h> |
| 10 | #include <linux/poll.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/fs.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 13 | #include <linux/sched/signal.h> |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 14 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | #include <linux/slab.h> |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 16 | #include <linux/list.h> |
| 17 | #include <linux/spinlock.h> |
| 18 | #include <linux/anon_inodes.h> |
Adrian Bunk | 7747cdb | 2008-02-06 01:36:49 -0800 | [diff] [blame] | 19 | #include <linux/syscalls.h> |
Paul Gortmaker | 630d9c4 | 2011-11-16 23:57:37 -0500 | [diff] [blame] | 20 | #include <linux/export.h> |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 21 | #include <linux/kref.h> |
| 22 | #include <linux/eventfd.h> |
Cyrill Gorcunov | cbac554 | 2012-12-17 16:04:57 -0800 | [diff] [blame] | 23 | #include <linux/proc_fs.h> |
| 24 | #include <linux/seq_file.h> |
Masatake YAMATO | b556db1 | 2019-05-14 15:45:19 -0700 | [diff] [blame] | 25 | #include <linux/idr.h> |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 26 | #include <linux/uio.h> |
Masatake YAMATO | b556db1 | 2019-05-14 15:45:19 -0700 | [diff] [blame] | 27 | |
YueHaibing | ce528c4 | 2019-05-14 15:45:22 -0700 | [diff] [blame] | 28 | static DEFINE_IDA(eventfd_ida); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 29 | |
| 30 | struct eventfd_ctx { |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 31 | struct kref kref; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 32 | wait_queue_head_t wqh; |
| 33 | /* |
| 34 | * Every time that a write(2) is performed on an eventfd, the |
| 35 | * value of the __u64 being written is added to "count" and a |
| 36 | * wakeup is performed on "wqh". A read(2) will return the "count" |
| 37 | * value to userspace, and will reset "count" to zero. The kernel |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 38 | * side eventfd_signal() also, adds to the "count" counter and |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 39 | * issue a wakeup. |
| 40 | */ |
| 41 | __u64 count; |
Davide Libenzi | bcd0b23 | 2009-03-31 15:24:18 -0700 | [diff] [blame] | 42 | unsigned int flags; |
Masatake YAMATO | b556db1 | 2019-05-14 15:45:19 -0700 | [diff] [blame] | 43 | int id; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 44 | }; |
| 45 | |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 46 | /** |
| 47 | * eventfd_signal - Adds @n to the eventfd counter. |
| 48 | * @ctx: [in] Pointer to the eventfd context. |
| 49 | * @n: [in] Value of the counter to be added to the eventfd internal counter. |
| 50 | * The value cannot be negative. |
| 51 | * |
| 52 | * This function is supposed to be called by the kernel in paths that do not |
| 53 | * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 54 | * value, and we signal this as overflow condition by returning a EPOLLERR |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 55 | * to poll(2). |
| 56 | * |
Masanari Iida | 20d5a86 | 2015-09-22 12:04:17 +0900 | [diff] [blame] | 57 | * Returns the amount by which the counter was incremented. This will be less |
Sha Zhengju | ee62c6b | 2012-05-31 16:26:41 -0700 | [diff] [blame] | 58 | * than @n if the counter has overflowed. |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 59 | */ |
Sha Zhengju | ee62c6b | 2012-05-31 16:26:41 -0700 | [diff] [blame] | 60 | __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 61 | { |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 62 | unsigned long flags; |
| 63 | |
Jens Axboe | b5e683d | 2020-02-02 08:23:03 -0700 | [diff] [blame] | 64 | /* |
| 65 | * Deadlock or stack overflow issues can happen if we recurse here |
| 66 | * through waitqueue wakeup handlers. If the caller users potentially |
| 67 | * nested waitqueues with custom wakeup handlers, then it should |
Thomas Gleixner | b542e38 | 2021-07-29 13:01:59 +0200 | [diff] [blame] | 68 | * check eventfd_signal_allowed() before calling this function. If |
| 69 | * it returns false, the eventfd_signal() call should be deferred to a |
Jens Axboe | b5e683d | 2020-02-02 08:23:03 -0700 | [diff] [blame] | 70 | * safe context. |
| 71 | */ |
Thomas Gleixner | b542e38 | 2021-07-29 13:01:59 +0200 | [diff] [blame] | 72 | if (WARN_ON_ONCE(current->in_eventfd_signal)) |
Jens Axboe | b5e683d | 2020-02-02 08:23:03 -0700 | [diff] [blame] | 73 | return 0; |
| 74 | |
Davide Libenzi | d48eb23 | 2007-05-18 12:02:33 -0700 | [diff] [blame] | 75 | spin_lock_irqsave(&ctx->wqh.lock, flags); |
Thomas Gleixner | b542e38 | 2021-07-29 13:01:59 +0200 | [diff] [blame] | 76 | current->in_eventfd_signal = 1; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 77 | if (ULLONG_MAX - ctx->count < n) |
Sha Zhengju | ee62c6b | 2012-05-31 16:26:41 -0700 | [diff] [blame] | 78 | n = ULLONG_MAX - ctx->count; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 79 | ctx->count += n; |
| 80 | if (waitqueue_active(&ctx->wqh)) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 81 | wake_up_locked_poll(&ctx->wqh, EPOLLIN); |
Thomas Gleixner | b542e38 | 2021-07-29 13:01:59 +0200 | [diff] [blame] | 82 | current->in_eventfd_signal = 0; |
Davide Libenzi | d48eb23 | 2007-05-18 12:02:33 -0700 | [diff] [blame] | 83 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 84 | |
| 85 | return n; |
| 86 | } |
Rusty Russell | 5718607 | 2009-06-12 22:27:09 -0600 | [diff] [blame] | 87 | EXPORT_SYMBOL_GPL(eventfd_signal); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 88 | |
Davide Libenzi | 562787a | 2009-09-22 16:43:57 -0700 | [diff] [blame] | 89 | static void eventfd_free_ctx(struct eventfd_ctx *ctx) |
| 90 | { |
Masatake YAMATO | b556db1 | 2019-05-14 15:45:19 -0700 | [diff] [blame] | 91 | if (ctx->id >= 0) |
| 92 | ida_simple_remove(&eventfd_ida, ctx->id); |
Davide Libenzi | 562787a | 2009-09-22 16:43:57 -0700 | [diff] [blame] | 93 | kfree(ctx); |
| 94 | } |
| 95 | |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 96 | static void eventfd_free(struct kref *kref) |
| 97 | { |
| 98 | struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); |
| 99 | |
Davide Libenzi | 562787a | 2009-09-22 16:43:57 -0700 | [diff] [blame] | 100 | eventfd_free_ctx(ctx); |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | /** |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 104 | * eventfd_ctx_put - Releases a reference to the internal eventfd context. |
| 105 | * @ctx: [in] Pointer to eventfd context. |
| 106 | * |
| 107 | * The eventfd context reference must have been previously acquired either |
Eric Biggers | 105f2b7 | 2018-01-06 09:45:44 -0800 | [diff] [blame] | 108 | * with eventfd_ctx_fdget() or eventfd_ctx_fileget(). |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 109 | */ |
| 110 | void eventfd_ctx_put(struct eventfd_ctx *ctx) |
| 111 | { |
| 112 | kref_put(&ctx->kref, eventfd_free); |
| 113 | } |
| 114 | EXPORT_SYMBOL_GPL(eventfd_ctx_put); |
| 115 | |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 116 | static int eventfd_release(struct inode *inode, struct file *file) |
| 117 | { |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 118 | struct eventfd_ctx *ctx = file->private_data; |
| 119 | |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 120 | wake_up_poll(&ctx->wqh, EPOLLHUP); |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 121 | eventfd_ctx_put(ctx); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 122 | return 0; |
| 123 | } |
| 124 | |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 125 | static __poll_t eventfd_poll(struct file *file, poll_table *wait) |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 126 | { |
| 127 | struct eventfd_ctx *ctx = file->private_data; |
Al Viro | 076ccb7 | 2017-07-03 01:02:18 -0400 | [diff] [blame] | 128 | __poll_t events = 0; |
Chris Mason | e22553e | 2015-02-17 13:46:07 -0800 | [diff] [blame] | 129 | u64 count; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 130 | |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 131 | poll_wait(file, &ctx->wqh, wait); |
| 132 | |
Paolo Bonzini | a484c3d | 2016-03-22 14:27:14 -0700 | [diff] [blame] | 133 | /* |
| 134 | * All writes to ctx->count occur within ctx->wqh.lock. This read |
| 135 | * can be done outside ctx->wqh.lock because we know that poll_wait |
| 136 | * takes that lock (through add_wait_queue) if our caller will sleep. |
| 137 | * |
| 138 | * The read _can_ therefore seep into add_wait_queue's critical |
| 139 | * section, but cannot move above it! add_wait_queue's spin_lock acts |
| 140 | * as an acquire barrier and ensures that the read be ordered properly |
| 141 | * against the writes. The following CAN happen and is safe: |
| 142 | * |
| 143 | * poll write |
| 144 | * ----------------- ------------ |
| 145 | * lock ctx->wqh.lock (in poll_wait) |
| 146 | * count = ctx->count |
| 147 | * __add_wait_queue |
| 148 | * unlock ctx->wqh.lock |
| 149 | * lock ctx->qwh.lock |
| 150 | * ctx->count += n |
| 151 | * if (waitqueue_active) |
| 152 | * wake_up_locked_poll |
| 153 | * unlock ctx->qwh.lock |
| 154 | * eventfd_poll returns 0 |
| 155 | * |
| 156 | * but the following, which would miss a wakeup, cannot happen: |
| 157 | * |
| 158 | * poll write |
| 159 | * ----------------- ------------ |
| 160 | * count = ctx->count (INVALID!) |
| 161 | * lock ctx->qwh.lock |
| 162 | * ctx->count += n |
| 163 | * **waitqueue_active is false** |
| 164 | * **no wake_up_locked_poll!** |
| 165 | * unlock ctx->qwh.lock |
| 166 | * lock ctx->wqh.lock (in poll_wait) |
| 167 | * __add_wait_queue |
| 168 | * unlock ctx->wqh.lock |
| 169 | * eventfd_poll returns 0 |
| 170 | */ |
| 171 | count = READ_ONCE(ctx->count); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 172 | |
Chris Mason | e22553e | 2015-02-17 13:46:07 -0800 | [diff] [blame] | 173 | if (count > 0) |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 174 | events |= EPOLLIN; |
Chris Mason | e22553e | 2015-02-17 13:46:07 -0800 | [diff] [blame] | 175 | if (count == ULLONG_MAX) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 176 | events |= EPOLLERR; |
Chris Mason | e22553e | 2015-02-17 13:46:07 -0800 | [diff] [blame] | 177 | if (ULLONG_MAX - 1 > count) |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 178 | events |= EPOLLOUT; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 179 | |
| 180 | return events; |
| 181 | } |
| 182 | |
David Woodhouse | 28f1326 | 2020-10-27 13:55:21 +0000 | [diff] [blame] | 183 | void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 184 | { |
David Woodhouse | 28f1326 | 2020-10-27 13:55:21 +0000 | [diff] [blame] | 185 | lockdep_assert_held(&ctx->wqh.lock); |
| 186 | |
Davide Libenzi | cb289d6 | 2010-01-13 09:34:36 -0800 | [diff] [blame] | 187 | *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; |
| 188 | ctx->count -= *cnt; |
| 189 | } |
David Woodhouse | 28f1326 | 2020-10-27 13:55:21 +0000 | [diff] [blame] | 190 | EXPORT_SYMBOL_GPL(eventfd_ctx_do_read); |
Davide Libenzi | cb289d6 | 2010-01-13 09:34:36 -0800 | [diff] [blame] | 191 | |
| 192 | /** |
| 193 | * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. |
| 194 | * @ctx: [in] Pointer to eventfd context. |
| 195 | * @wait: [in] Wait queue to be removed. |
Randy Dunlap | 3618218 | 2011-02-20 20:08:35 -0800 | [diff] [blame] | 196 | * @cnt: [out] Pointer to the 64-bit counter value. |
Davide Libenzi | cb289d6 | 2010-01-13 09:34:36 -0800 | [diff] [blame] | 197 | * |
Randy Dunlap | 3618218 | 2011-02-20 20:08:35 -0800 | [diff] [blame] | 198 | * Returns %0 if successful, or the following error codes: |
Davide Libenzi | cb289d6 | 2010-01-13 09:34:36 -0800 | [diff] [blame] | 199 | * |
| 200 | * -EAGAIN : The operation would have blocked. |
| 201 | * |
| 202 | * This is used to atomically remove a wait queue entry from the eventfd wait |
| 203 | * queue head, and read/reset the counter value. |
| 204 | */ |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 205 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, |
Davide Libenzi | cb289d6 | 2010-01-13 09:34:36 -0800 | [diff] [blame] | 206 | __u64 *cnt) |
| 207 | { |
| 208 | unsigned long flags; |
| 209 | |
| 210 | spin_lock_irqsave(&ctx->wqh.lock, flags); |
| 211 | eventfd_ctx_do_read(ctx, cnt); |
| 212 | __remove_wait_queue(&ctx->wqh, wait); |
| 213 | if (*cnt != 0 && waitqueue_active(&ctx->wqh)) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 214 | wake_up_locked_poll(&ctx->wqh, EPOLLOUT); |
Davide Libenzi | cb289d6 | 2010-01-13 09:34:36 -0800 | [diff] [blame] | 215 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); |
| 216 | |
| 217 | return *cnt != 0 ? 0 : -EAGAIN; |
| 218 | } |
| 219 | EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); |
| 220 | |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 221 | static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to) |
Davide Libenzi | cb289d6 | 2010-01-13 09:34:36 -0800 | [diff] [blame] | 222 | { |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 223 | struct file *file = iocb->ki_filp; |
Eric Biggers | b636457 | 2018-01-06 09:45:43 -0800 | [diff] [blame] | 224 | struct eventfd_ctx *ctx = file->private_data; |
Eric Biggers | b636457 | 2018-01-06 09:45:43 -0800 | [diff] [blame] | 225 | __u64 ucnt = 0; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 226 | DECLARE_WAITQUEUE(wait, current); |
| 227 | |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 228 | if (iov_iter_count(to) < sizeof(ucnt)) |
Eric Biggers | b636457 | 2018-01-06 09:45:43 -0800 | [diff] [blame] | 229 | return -EINVAL; |
Davide Libenzi | d48eb23 | 2007-05-18 12:02:33 -0700 | [diff] [blame] | 230 | spin_lock_irq(&ctx->wqh.lock); |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 231 | if (!ctx->count) { |
| 232 | if ((file->f_flags & O_NONBLOCK) || |
| 233 | (iocb->ki_flags & IOCB_NOWAIT)) { |
| 234 | spin_unlock_irq(&ctx->wqh.lock); |
| 235 | return -EAGAIN; |
| 236 | } |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 237 | __add_wait_queue(&ctx->wqh, &wait); |
Davide Libenzi | cb289d6 | 2010-01-13 09:34:36 -0800 | [diff] [blame] | 238 | for (;;) { |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 239 | set_current_state(TASK_INTERRUPTIBLE); |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 240 | if (ctx->count) |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 241 | break; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 242 | if (signal_pending(current)) { |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 243 | __remove_wait_queue(&ctx->wqh, &wait); |
| 244 | __set_current_state(TASK_RUNNING); |
| 245 | spin_unlock_irq(&ctx->wqh.lock); |
| 246 | return -ERESTARTSYS; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 247 | } |
Davide Libenzi | d48eb23 | 2007-05-18 12:02:33 -0700 | [diff] [blame] | 248 | spin_unlock_irq(&ctx->wqh.lock); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 249 | schedule(); |
Davide Libenzi | d48eb23 | 2007-05-18 12:02:33 -0700 | [diff] [blame] | 250 | spin_lock_irq(&ctx->wqh.lock); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 251 | } |
| 252 | __remove_wait_queue(&ctx->wqh, &wait); |
| 253 | __set_current_state(TASK_RUNNING); |
| 254 | } |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 255 | eventfd_ctx_do_read(ctx, &ucnt); |
| 256 | if (waitqueue_active(&ctx->wqh)) |
| 257 | wake_up_locked_poll(&ctx->wqh, EPOLLOUT); |
Davide Libenzi | d48eb23 | 2007-05-18 12:02:33 -0700 | [diff] [blame] | 258 | spin_unlock_irq(&ctx->wqh.lock); |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 259 | if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt))) |
Eric Biggers | b636457 | 2018-01-06 09:45:43 -0800 | [diff] [blame] | 260 | return -EFAULT; |
| 261 | |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 262 | return sizeof(ucnt); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, |
| 266 | loff_t *ppos) |
| 267 | { |
| 268 | struct eventfd_ctx *ctx = file->private_data; |
| 269 | ssize_t res; |
| 270 | __u64 ucnt; |
| 271 | DECLARE_WAITQUEUE(wait, current); |
| 272 | |
| 273 | if (count < sizeof(ucnt)) |
| 274 | return -EINVAL; |
| 275 | if (copy_from_user(&ucnt, buf, sizeof(ucnt))) |
| 276 | return -EFAULT; |
| 277 | if (ucnt == ULLONG_MAX) |
| 278 | return -EINVAL; |
Davide Libenzi | d48eb23 | 2007-05-18 12:02:33 -0700 | [diff] [blame] | 279 | spin_lock_irq(&ctx->wqh.lock); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 280 | res = -EAGAIN; |
| 281 | if (ULLONG_MAX - ctx->count > ucnt) |
| 282 | res = sizeof(ucnt); |
| 283 | else if (!(file->f_flags & O_NONBLOCK)) { |
| 284 | __add_wait_queue(&ctx->wqh, &wait); |
| 285 | for (res = 0;;) { |
| 286 | set_current_state(TASK_INTERRUPTIBLE); |
| 287 | if (ULLONG_MAX - ctx->count > ucnt) { |
| 288 | res = sizeof(ucnt); |
| 289 | break; |
| 290 | } |
| 291 | if (signal_pending(current)) { |
| 292 | res = -ERESTARTSYS; |
| 293 | break; |
| 294 | } |
Davide Libenzi | d48eb23 | 2007-05-18 12:02:33 -0700 | [diff] [blame] | 295 | spin_unlock_irq(&ctx->wqh.lock); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 296 | schedule(); |
Davide Libenzi | d48eb23 | 2007-05-18 12:02:33 -0700 | [diff] [blame] | 297 | spin_lock_irq(&ctx->wqh.lock); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 298 | } |
| 299 | __remove_wait_queue(&ctx->wqh, &wait); |
| 300 | __set_current_state(TASK_RUNNING); |
| 301 | } |
Davide Libenzi | bcd0b23 | 2009-03-31 15:24:18 -0700 | [diff] [blame] | 302 | if (likely(res > 0)) { |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 303 | ctx->count += ucnt; |
| 304 | if (waitqueue_active(&ctx->wqh)) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 305 | wake_up_locked_poll(&ctx->wqh, EPOLLIN); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 306 | } |
Davide Libenzi | d48eb23 | 2007-05-18 12:02:33 -0700 | [diff] [blame] | 307 | spin_unlock_irq(&ctx->wqh.lock); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 308 | |
| 309 | return res; |
| 310 | } |
| 311 | |
Cyrill Gorcunov | cbac554 | 2012-12-17 16:04:57 -0800 | [diff] [blame] | 312 | #ifdef CONFIG_PROC_FS |
Joe Perches | a3816ab | 2014-09-29 16:08:25 -0700 | [diff] [blame] | 313 | static void eventfd_show_fdinfo(struct seq_file *m, struct file *f) |
Cyrill Gorcunov | cbac554 | 2012-12-17 16:04:57 -0800 | [diff] [blame] | 314 | { |
| 315 | struct eventfd_ctx *ctx = f->private_data; |
Cyrill Gorcunov | cbac554 | 2012-12-17 16:04:57 -0800 | [diff] [blame] | 316 | |
| 317 | spin_lock_irq(&ctx->wqh.lock); |
Joe Perches | a3816ab | 2014-09-29 16:08:25 -0700 | [diff] [blame] | 318 | seq_printf(m, "eventfd-count: %16llx\n", |
| 319 | (unsigned long long)ctx->count); |
Cyrill Gorcunov | cbac554 | 2012-12-17 16:04:57 -0800 | [diff] [blame] | 320 | spin_unlock_irq(&ctx->wqh.lock); |
Masatake YAMATO | b556db1 | 2019-05-14 15:45:19 -0700 | [diff] [blame] | 321 | seq_printf(m, "eventfd-id: %d\n", ctx->id); |
Cyrill Gorcunov | cbac554 | 2012-12-17 16:04:57 -0800 | [diff] [blame] | 322 | } |
| 323 | #endif |
| 324 | |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 325 | static const struct file_operations eventfd_fops = { |
Cyrill Gorcunov | cbac554 | 2012-12-17 16:04:57 -0800 | [diff] [blame] | 326 | #ifdef CONFIG_PROC_FS |
| 327 | .show_fdinfo = eventfd_show_fdinfo, |
| 328 | #endif |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 329 | .release = eventfd_release, |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 330 | .poll = eventfd_poll, |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 331 | .read_iter = eventfd_read, |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 332 | .write = eventfd_write, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 333 | .llseek = noop_llseek, |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 334 | }; |
| 335 | |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 336 | /** |
| 337 | * eventfd_fget - Acquire a reference of an eventfd file descriptor. |
| 338 | * @fd: [in] Eventfd file descriptor. |
| 339 | * |
| 340 | * Returns a pointer to the eventfd file structure in case of success, or the |
| 341 | * following error pointer: |
| 342 | * |
| 343 | * -EBADF : Invalid @fd file descriptor. |
| 344 | * -EINVAL : The @fd file descriptor is not an eventfd file. |
| 345 | */ |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 346 | struct file *eventfd_fget(int fd) |
| 347 | { |
| 348 | struct file *file; |
| 349 | |
| 350 | file = fget(fd); |
| 351 | if (!file) |
| 352 | return ERR_PTR(-EBADF); |
| 353 | if (file->f_op != &eventfd_fops) { |
| 354 | fput(file); |
| 355 | return ERR_PTR(-EINVAL); |
| 356 | } |
| 357 | |
| 358 | return file; |
| 359 | } |
Rusty Russell | 5718607 | 2009-06-12 22:27:09 -0600 | [diff] [blame] | 360 | EXPORT_SYMBOL_GPL(eventfd_fget); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 361 | |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 362 | /** |
| 363 | * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. |
| 364 | * @fd: [in] Eventfd file descriptor. |
| 365 | * |
| 366 | * Returns a pointer to the internal eventfd context, otherwise the error |
| 367 | * pointers returned by the following functions: |
| 368 | * |
| 369 | * eventfd_fget |
| 370 | */ |
| 371 | struct eventfd_ctx *eventfd_ctx_fdget(int fd) |
| 372 | { |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 373 | struct eventfd_ctx *ctx; |
Al Viro | 36a7411 | 2013-12-23 16:51:33 -0500 | [diff] [blame] | 374 | struct fd f = fdget(fd); |
| 375 | if (!f.file) |
| 376 | return ERR_PTR(-EBADF); |
| 377 | ctx = eventfd_ctx_fileget(f.file); |
| 378 | fdput(f); |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 379 | return ctx; |
| 380 | } |
| 381 | EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); |
| 382 | |
| 383 | /** |
| 384 | * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. |
| 385 | * @file: [in] Eventfd file pointer. |
| 386 | * |
| 387 | * Returns a pointer to the internal eventfd context, otherwise the error |
| 388 | * pointer: |
| 389 | * |
| 390 | * -EINVAL : The @fd file descriptor is not an eventfd file. |
| 391 | */ |
| 392 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) |
| 393 | { |
Eric Biggers | 105f2b7 | 2018-01-06 09:45:44 -0800 | [diff] [blame] | 394 | struct eventfd_ctx *ctx; |
| 395 | |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 396 | if (file->f_op != &eventfd_fops) |
| 397 | return ERR_PTR(-EINVAL); |
| 398 | |
Eric Biggers | 105f2b7 | 2018-01-06 09:45:44 -0800 | [diff] [blame] | 399 | ctx = file->private_data; |
| 400 | kref_get(&ctx->kref); |
| 401 | return ctx; |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 402 | } |
| 403 | EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); |
| 404 | |
Dominik Brodowski | 2fc96f8 | 2018-03-11 11:34:37 +0100 | [diff] [blame] | 405 | static int do_eventfd(unsigned int count, int flags) |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 406 | { |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 407 | struct eventfd_ctx *ctx; |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 408 | struct file *file; |
Eric Biggers | 7d81516 | 2018-01-06 09:45:42 -0800 | [diff] [blame] | 409 | int fd; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 410 | |
Ulrich Drepper | e38b36f | 2008-07-23 21:29:42 -0700 | [diff] [blame] | 411 | /* Check the EFD_* constants for consistency. */ |
| 412 | BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); |
| 413 | BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); |
| 414 | |
Davide Libenzi | bcd0b23 | 2009-03-31 15:24:18 -0700 | [diff] [blame] | 415 | if (flags & ~EFD_FLAGS_SET) |
Eric Biggers | 7d81516 | 2018-01-06 09:45:42 -0800 | [diff] [blame] | 416 | return -EINVAL; |
Ulrich Drepper | b087498e | 2008-07-23 21:29:25 -0700 | [diff] [blame] | 417 | |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 418 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 419 | if (!ctx) |
Eric Biggers | 7d81516 | 2018-01-06 09:45:42 -0800 | [diff] [blame] | 420 | return -ENOMEM; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 421 | |
Davide Libenzi | 1338901 | 2009-06-30 11:41:11 -0700 | [diff] [blame] | 422 | kref_init(&ctx->kref); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 423 | init_waitqueue_head(&ctx->wqh); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 424 | ctx->count = count; |
Davide Libenzi | bcd0b23 | 2009-03-31 15:24:18 -0700 | [diff] [blame] | 425 | ctx->flags = flags; |
Masatake YAMATO | b556db1 | 2019-05-14 15:45:19 -0700 | [diff] [blame] | 426 | ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL); |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 427 | |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 428 | flags &= EFD_SHARED_FCNTL_FLAGS; |
| 429 | flags |= O_RDWR; |
| 430 | fd = get_unused_fd_flags(flags); |
Eric Biggers | 7d81516 | 2018-01-06 09:45:42 -0800 | [diff] [blame] | 431 | if (fd < 0) |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 432 | goto err; |
Davide Libenzi | 562787a | 2009-09-22 16:43:57 -0700 | [diff] [blame] | 433 | |
Jens Axboe | 12aceb8 | 2020-05-01 13:11:09 -0600 | [diff] [blame] | 434 | file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags); |
| 435 | if (IS_ERR(file)) { |
| 436 | put_unused_fd(fd); |
| 437 | fd = PTR_ERR(file); |
| 438 | goto err; |
| 439 | } |
| 440 | |
| 441 | file->f_mode |= FMODE_NOWAIT; |
| 442 | fd_install(fd, file); |
| 443 | return fd; |
| 444 | err: |
| 445 | eventfd_free_ctx(ctx); |
Al Viro | 2030a42 | 2008-02-23 06:46:49 -0500 | [diff] [blame] | 446 | return fd; |
Davide Libenzi | e1ad746 | 2007-05-10 22:23:19 -0700 | [diff] [blame] | 447 | } |
| 448 | |
Dominik Brodowski | 2fc96f8 | 2018-03-11 11:34:37 +0100 | [diff] [blame] | 449 | SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) |
| 450 | { |
| 451 | return do_eventfd(count, flags); |
| 452 | } |
| 453 | |
Heiko Carstens | d4e8204 | 2009-01-14 14:14:34 +0100 | [diff] [blame] | 454 | SYSCALL_DEFINE1(eventfd, unsigned int, count) |
Ulrich Drepper | b087498e | 2008-07-23 21:29:25 -0700 | [diff] [blame] | 455 | { |
Dominik Brodowski | 2fc96f8 | 2018-03-11 11:34:37 +0100 | [diff] [blame] | 456 | return do_eventfd(count, 0); |
Ulrich Drepper | b087498e | 2008-07-23 21:29:25 -0700 | [diff] [blame] | 457 | } |
Davide Libenzi | bcd0b23 | 2009-03-31 15:24:18 -0700 | [diff] [blame] | 458 | |