blob: e265b6dd4f345f55cea7775bdb08b938023711b7 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Davide Libenzie1ad7462007-05-10 22:23:19 -07002/*
3 * fs/eventfd.c
4 *
5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
6 *
7 */
8
9#include <linux/file.h>
10#include <linux/poll.h>
11#include <linux/init.h>
12#include <linux/fs.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010013#include <linux/sched/signal.h>
Davide Libenzie1ad7462007-05-10 22:23:19 -070014#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Davide Libenzie1ad7462007-05-10 22:23:19 -070016#include <linux/list.h>
17#include <linux/spinlock.h>
18#include <linux/anon_inodes.h>
Adrian Bunk7747cdb2008-02-06 01:36:49 -080019#include <linux/syscalls.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050020#include <linux/export.h>
Davide Libenzi13389012009-06-30 11:41:11 -070021#include <linux/kref.h>
22#include <linux/eventfd.h>
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -080023#include <linux/proc_fs.h>
24#include <linux/seq_file.h>
Masatake YAMATOb556db12019-05-14 15:45:19 -070025#include <linux/idr.h>
Jens Axboe12aceb82020-05-01 13:11:09 -060026#include <linux/uio.h>
Masatake YAMATOb556db12019-05-14 15:45:19 -070027
Jens Axboeb5e683d2020-02-02 08:23:03 -070028DEFINE_PER_CPU(int, eventfd_wake_count);
29
YueHaibingce528c42019-05-14 15:45:22 -070030static DEFINE_IDA(eventfd_ida);
Davide Libenzie1ad7462007-05-10 22:23:19 -070031
32struct eventfd_ctx {
Davide Libenzi13389012009-06-30 11:41:11 -070033 struct kref kref;
Davide Libenzie1ad7462007-05-10 22:23:19 -070034 wait_queue_head_t wqh;
35 /*
36 * Every time that a write(2) is performed on an eventfd, the
37 * value of the __u64 being written is added to "count" and a
38 * wakeup is performed on "wqh". A read(2) will return the "count"
39 * value to userspace, and will reset "count" to zero. The kernel
Davide Libenzi13389012009-06-30 11:41:11 -070040 * side eventfd_signal() also, adds to the "count" counter and
Davide Libenzie1ad7462007-05-10 22:23:19 -070041 * issue a wakeup.
42 */
43 __u64 count;
Davide Libenzibcd0b232009-03-31 15:24:18 -070044 unsigned int flags;
Masatake YAMATOb556db12019-05-14 15:45:19 -070045 int id;
Davide Libenzie1ad7462007-05-10 22:23:19 -070046};
47
Davide Libenzi13389012009-06-30 11:41:11 -070048/**
49 * eventfd_signal - Adds @n to the eventfd counter.
50 * @ctx: [in] Pointer to the eventfd context.
51 * @n: [in] Value of the counter to be added to the eventfd internal counter.
52 * The value cannot be negative.
53 *
54 * This function is supposed to be called by the kernel in paths that do not
55 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
Linus Torvaldsa9a08842018-02-11 14:34:03 -080056 * value, and we signal this as overflow condition by returning a EPOLLERR
Davide Libenzi13389012009-06-30 11:41:11 -070057 * to poll(2).
58 *
Masanari Iida20d5a862015-09-22 12:04:17 +090059 * Returns the amount by which the counter was incremented. This will be less
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070060 * than @n if the counter has overflowed.
Davide Libenzie1ad7462007-05-10 22:23:19 -070061 */
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070062__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
Davide Libenzie1ad7462007-05-10 22:23:19 -070063{
Davide Libenzie1ad7462007-05-10 22:23:19 -070064 unsigned long flags;
65
Jens Axboeb5e683d2020-02-02 08:23:03 -070066 /*
67 * Deadlock or stack overflow issues can happen if we recurse here
68 * through waitqueue wakeup handlers. If the caller users potentially
69 * nested waitqueues with custom wakeup handlers, then it should
70 * check eventfd_signal_count() before calling this function. If
71 * it returns true, the eventfd_signal() call should be deferred to a
72 * safe context.
73 */
74 if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
75 return 0;
76
Davide Libenzid48eb232007-05-18 12:02:33 -070077 spin_lock_irqsave(&ctx->wqh.lock, flags);
Jens Axboeb5e683d2020-02-02 08:23:03 -070078 this_cpu_inc(eventfd_wake_count);
Davide Libenzie1ad7462007-05-10 22:23:19 -070079 if (ULLONG_MAX - ctx->count < n)
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070080 n = ULLONG_MAX - ctx->count;
Davide Libenzie1ad7462007-05-10 22:23:19 -070081 ctx->count += n;
82 if (waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -080083 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
Jens Axboeb5e683d2020-02-02 08:23:03 -070084 this_cpu_dec(eventfd_wake_count);
Davide Libenzid48eb232007-05-18 12:02:33 -070085 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
Davide Libenzie1ad7462007-05-10 22:23:19 -070086
87 return n;
88}
Rusty Russell57186072009-06-12 22:27:09 -060089EXPORT_SYMBOL_GPL(eventfd_signal);
Davide Libenzie1ad7462007-05-10 22:23:19 -070090
Davide Libenzi562787a2009-09-22 16:43:57 -070091static void eventfd_free_ctx(struct eventfd_ctx *ctx)
92{
Masatake YAMATOb556db12019-05-14 15:45:19 -070093 if (ctx->id >= 0)
94 ida_simple_remove(&eventfd_ida, ctx->id);
Davide Libenzi562787a2009-09-22 16:43:57 -070095 kfree(ctx);
96}
97
Davide Libenzi13389012009-06-30 11:41:11 -070098static void eventfd_free(struct kref *kref)
99{
100 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
101
Davide Libenzi562787a2009-09-22 16:43:57 -0700102 eventfd_free_ctx(ctx);
Davide Libenzi13389012009-06-30 11:41:11 -0700103}
104
105/**
Davide Libenzi13389012009-06-30 11:41:11 -0700106 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
107 * @ctx: [in] Pointer to eventfd context.
108 *
109 * The eventfd context reference must have been previously acquired either
Eric Biggers105f2b72018-01-06 09:45:44 -0800110 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
Davide Libenzi13389012009-06-30 11:41:11 -0700111 */
112void eventfd_ctx_put(struct eventfd_ctx *ctx)
113{
114 kref_put(&ctx->kref, eventfd_free);
115}
116EXPORT_SYMBOL_GPL(eventfd_ctx_put);
117
Davide Libenzie1ad7462007-05-10 22:23:19 -0700118static int eventfd_release(struct inode *inode, struct file *file)
119{
Davide Libenzi13389012009-06-30 11:41:11 -0700120 struct eventfd_ctx *ctx = file->private_data;
121
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800122 wake_up_poll(&ctx->wqh, EPOLLHUP);
Davide Libenzi13389012009-06-30 11:41:11 -0700123 eventfd_ctx_put(ctx);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700124 return 0;
125}
126
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700127static __poll_t eventfd_poll(struct file *file, poll_table *wait)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700128{
129 struct eventfd_ctx *ctx = file->private_data;
Al Viro076ccb72017-07-03 01:02:18 -0400130 __poll_t events = 0;
Chris Masone22553e2015-02-17 13:46:07 -0800131 u64 count;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700132
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700133 poll_wait(file, &ctx->wqh, wait);
134
Paolo Bonzinia484c3d2016-03-22 14:27:14 -0700135 /*
136 * All writes to ctx->count occur within ctx->wqh.lock. This read
137 * can be done outside ctx->wqh.lock because we know that poll_wait
138 * takes that lock (through add_wait_queue) if our caller will sleep.
139 *
140 * The read _can_ therefore seep into add_wait_queue's critical
141 * section, but cannot move above it! add_wait_queue's spin_lock acts
142 * as an acquire barrier and ensures that the read be ordered properly
143 * against the writes. The following CAN happen and is safe:
144 *
145 * poll write
146 * ----------------- ------------
147 * lock ctx->wqh.lock (in poll_wait)
148 * count = ctx->count
149 * __add_wait_queue
150 * unlock ctx->wqh.lock
151 * lock ctx->qwh.lock
152 * ctx->count += n
153 * if (waitqueue_active)
154 * wake_up_locked_poll
155 * unlock ctx->qwh.lock
156 * eventfd_poll returns 0
157 *
158 * but the following, which would miss a wakeup, cannot happen:
159 *
160 * poll write
161 * ----------------- ------------
162 * count = ctx->count (INVALID!)
163 * lock ctx->qwh.lock
164 * ctx->count += n
165 * **waitqueue_active is false**
166 * **no wake_up_locked_poll!**
167 * unlock ctx->qwh.lock
168 * lock ctx->wqh.lock (in poll_wait)
169 * __add_wait_queue
170 * unlock ctx->wqh.lock
171 * eventfd_poll returns 0
172 */
173 count = READ_ONCE(ctx->count);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700174
Chris Masone22553e2015-02-17 13:46:07 -0800175 if (count > 0)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700176 events |= EPOLLIN;
Chris Masone22553e2015-02-17 13:46:07 -0800177 if (count == ULLONG_MAX)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800178 events |= EPOLLERR;
Chris Masone22553e2015-02-17 13:46:07 -0800179 if (ULLONG_MAX - 1 > count)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700180 events |= EPOLLOUT;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700181
182 return events;
183}
184
David Woodhouse28f13262020-10-27 13:55:21 +0000185void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700186{
David Woodhouse28f13262020-10-27 13:55:21 +0000187 lockdep_assert_held(&ctx->wqh.lock);
188
Davide Libenzicb289d62010-01-13 09:34:36 -0800189 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
190 ctx->count -= *cnt;
191}
David Woodhouse28f13262020-10-27 13:55:21 +0000192EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
Davide Libenzicb289d62010-01-13 09:34:36 -0800193
194/**
195 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
196 * @ctx: [in] Pointer to eventfd context.
197 * @wait: [in] Wait queue to be removed.
Randy Dunlap36182182011-02-20 20:08:35 -0800198 * @cnt: [out] Pointer to the 64-bit counter value.
Davide Libenzicb289d62010-01-13 09:34:36 -0800199 *
Randy Dunlap36182182011-02-20 20:08:35 -0800200 * Returns %0 if successful, or the following error codes:
Davide Libenzicb289d62010-01-13 09:34:36 -0800201 *
202 * -EAGAIN : The operation would have blocked.
203 *
204 * This is used to atomically remove a wait queue entry from the eventfd wait
205 * queue head, and read/reset the counter value.
206 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200207int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
Davide Libenzicb289d62010-01-13 09:34:36 -0800208 __u64 *cnt)
209{
210 unsigned long flags;
211
212 spin_lock_irqsave(&ctx->wqh.lock, flags);
213 eventfd_ctx_do_read(ctx, cnt);
214 __remove_wait_queue(&ctx->wqh, wait);
215 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800216 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
Davide Libenzicb289d62010-01-13 09:34:36 -0800217 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
218
219 return *cnt != 0 ? 0 : -EAGAIN;
220}
221EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
222
Jens Axboe12aceb82020-05-01 13:11:09 -0600223static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
Davide Libenzicb289d62010-01-13 09:34:36 -0800224{
Jens Axboe12aceb82020-05-01 13:11:09 -0600225 struct file *file = iocb->ki_filp;
Eric Biggersb6364572018-01-06 09:45:43 -0800226 struct eventfd_ctx *ctx = file->private_data;
Eric Biggersb6364572018-01-06 09:45:43 -0800227 __u64 ucnt = 0;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700228 DECLARE_WAITQUEUE(wait, current);
229
Jens Axboe12aceb82020-05-01 13:11:09 -0600230 if (iov_iter_count(to) < sizeof(ucnt))
Eric Biggersb6364572018-01-06 09:45:43 -0800231 return -EINVAL;
Davide Libenzid48eb232007-05-18 12:02:33 -0700232 spin_lock_irq(&ctx->wqh.lock);
Jens Axboe12aceb82020-05-01 13:11:09 -0600233 if (!ctx->count) {
234 if ((file->f_flags & O_NONBLOCK) ||
235 (iocb->ki_flags & IOCB_NOWAIT)) {
236 spin_unlock_irq(&ctx->wqh.lock);
237 return -EAGAIN;
238 }
Davide Libenzie1ad7462007-05-10 22:23:19 -0700239 __add_wait_queue(&ctx->wqh, &wait);
Davide Libenzicb289d62010-01-13 09:34:36 -0800240 for (;;) {
Davide Libenzie1ad7462007-05-10 22:23:19 -0700241 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe12aceb82020-05-01 13:11:09 -0600242 if (ctx->count)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700243 break;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700244 if (signal_pending(current)) {
Jens Axboe12aceb82020-05-01 13:11:09 -0600245 __remove_wait_queue(&ctx->wqh, &wait);
246 __set_current_state(TASK_RUNNING);
247 spin_unlock_irq(&ctx->wqh.lock);
248 return -ERESTARTSYS;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700249 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700250 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700251 schedule();
Davide Libenzid48eb232007-05-18 12:02:33 -0700252 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700253 }
254 __remove_wait_queue(&ctx->wqh, &wait);
255 __set_current_state(TASK_RUNNING);
256 }
Jens Axboe12aceb82020-05-01 13:11:09 -0600257 eventfd_ctx_do_read(ctx, &ucnt);
258 if (waitqueue_active(&ctx->wqh))
259 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
Davide Libenzid48eb232007-05-18 12:02:33 -0700260 spin_unlock_irq(&ctx->wqh.lock);
Jens Axboe12aceb82020-05-01 13:11:09 -0600261 if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
Eric Biggersb6364572018-01-06 09:45:43 -0800262 return -EFAULT;
263
Jens Axboe12aceb82020-05-01 13:11:09 -0600264 return sizeof(ucnt);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700265}
266
267static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
268 loff_t *ppos)
269{
270 struct eventfd_ctx *ctx = file->private_data;
271 ssize_t res;
272 __u64 ucnt;
273 DECLARE_WAITQUEUE(wait, current);
274
275 if (count < sizeof(ucnt))
276 return -EINVAL;
277 if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
278 return -EFAULT;
279 if (ucnt == ULLONG_MAX)
280 return -EINVAL;
Davide Libenzid48eb232007-05-18 12:02:33 -0700281 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700282 res = -EAGAIN;
283 if (ULLONG_MAX - ctx->count > ucnt)
284 res = sizeof(ucnt);
285 else if (!(file->f_flags & O_NONBLOCK)) {
286 __add_wait_queue(&ctx->wqh, &wait);
287 for (res = 0;;) {
288 set_current_state(TASK_INTERRUPTIBLE);
289 if (ULLONG_MAX - ctx->count > ucnt) {
290 res = sizeof(ucnt);
291 break;
292 }
293 if (signal_pending(current)) {
294 res = -ERESTARTSYS;
295 break;
296 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700297 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700298 schedule();
Davide Libenzid48eb232007-05-18 12:02:33 -0700299 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700300 }
301 __remove_wait_queue(&ctx->wqh, &wait);
302 __set_current_state(TASK_RUNNING);
303 }
Davide Libenzibcd0b232009-03-31 15:24:18 -0700304 if (likely(res > 0)) {
Davide Libenzie1ad7462007-05-10 22:23:19 -0700305 ctx->count += ucnt;
306 if (waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800307 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700308 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700309 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700310
311 return res;
312}
313
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800314#ifdef CONFIG_PROC_FS
Joe Perchesa3816ab2014-09-29 16:08:25 -0700315static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800316{
317 struct eventfd_ctx *ctx = f->private_data;
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800318
319 spin_lock_irq(&ctx->wqh.lock);
Joe Perchesa3816ab2014-09-29 16:08:25 -0700320 seq_printf(m, "eventfd-count: %16llx\n",
321 (unsigned long long)ctx->count);
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800322 spin_unlock_irq(&ctx->wqh.lock);
Masatake YAMATOb556db12019-05-14 15:45:19 -0700323 seq_printf(m, "eventfd-id: %d\n", ctx->id);
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800324}
325#endif
326
Davide Libenzie1ad7462007-05-10 22:23:19 -0700327static const struct file_operations eventfd_fops = {
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800328#ifdef CONFIG_PROC_FS
329 .show_fdinfo = eventfd_show_fdinfo,
330#endif
Davide Libenzie1ad7462007-05-10 22:23:19 -0700331 .release = eventfd_release,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700332 .poll = eventfd_poll,
Jens Axboe12aceb82020-05-01 13:11:09 -0600333 .read_iter = eventfd_read,
Davide Libenzie1ad7462007-05-10 22:23:19 -0700334 .write = eventfd_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200335 .llseek = noop_llseek,
Davide Libenzie1ad7462007-05-10 22:23:19 -0700336};
337
Davide Libenzi13389012009-06-30 11:41:11 -0700338/**
339 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
340 * @fd: [in] Eventfd file descriptor.
341 *
342 * Returns a pointer to the eventfd file structure in case of success, or the
343 * following error pointer:
344 *
345 * -EBADF : Invalid @fd file descriptor.
346 * -EINVAL : The @fd file descriptor is not an eventfd file.
347 */
Davide Libenzie1ad7462007-05-10 22:23:19 -0700348struct file *eventfd_fget(int fd)
349{
350 struct file *file;
351
352 file = fget(fd);
353 if (!file)
354 return ERR_PTR(-EBADF);
355 if (file->f_op != &eventfd_fops) {
356 fput(file);
357 return ERR_PTR(-EINVAL);
358 }
359
360 return file;
361}
Rusty Russell57186072009-06-12 22:27:09 -0600362EXPORT_SYMBOL_GPL(eventfd_fget);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700363
Davide Libenzi13389012009-06-30 11:41:11 -0700364/**
365 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
366 * @fd: [in] Eventfd file descriptor.
367 *
368 * Returns a pointer to the internal eventfd context, otherwise the error
369 * pointers returned by the following functions:
370 *
371 * eventfd_fget
372 */
373struct eventfd_ctx *eventfd_ctx_fdget(int fd)
374{
Davide Libenzi13389012009-06-30 11:41:11 -0700375 struct eventfd_ctx *ctx;
Al Viro36a74112013-12-23 16:51:33 -0500376 struct fd f = fdget(fd);
377 if (!f.file)
378 return ERR_PTR(-EBADF);
379 ctx = eventfd_ctx_fileget(f.file);
380 fdput(f);
Davide Libenzi13389012009-06-30 11:41:11 -0700381 return ctx;
382}
383EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
384
385/**
386 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
387 * @file: [in] Eventfd file pointer.
388 *
389 * Returns a pointer to the internal eventfd context, otherwise the error
390 * pointer:
391 *
392 * -EINVAL : The @fd file descriptor is not an eventfd file.
393 */
394struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
395{
Eric Biggers105f2b72018-01-06 09:45:44 -0800396 struct eventfd_ctx *ctx;
397
Davide Libenzi13389012009-06-30 11:41:11 -0700398 if (file->f_op != &eventfd_fops)
399 return ERR_PTR(-EINVAL);
400
Eric Biggers105f2b72018-01-06 09:45:44 -0800401 ctx = file->private_data;
402 kref_get(&ctx->kref);
403 return ctx;
Davide Libenzi13389012009-06-30 11:41:11 -0700404}
405EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
406
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100407static int do_eventfd(unsigned int count, int flags)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700408{
Davide Libenzie1ad7462007-05-10 22:23:19 -0700409 struct eventfd_ctx *ctx;
Jens Axboe12aceb82020-05-01 13:11:09 -0600410 struct file *file;
Eric Biggers7d815162018-01-06 09:45:42 -0800411 int fd;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700412
Ulrich Dreppere38b36f2008-07-23 21:29:42 -0700413 /* Check the EFD_* constants for consistency. */
414 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
415 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
416
Davide Libenzibcd0b232009-03-31 15:24:18 -0700417 if (flags & ~EFD_FLAGS_SET)
Eric Biggers7d815162018-01-06 09:45:42 -0800418 return -EINVAL;
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700419
Davide Libenzie1ad7462007-05-10 22:23:19 -0700420 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
421 if (!ctx)
Eric Biggers7d815162018-01-06 09:45:42 -0800422 return -ENOMEM;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700423
Davide Libenzi13389012009-06-30 11:41:11 -0700424 kref_init(&ctx->kref);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700425 init_waitqueue_head(&ctx->wqh);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700426 ctx->count = count;
Davide Libenzibcd0b232009-03-31 15:24:18 -0700427 ctx->flags = flags;
Masatake YAMATOb556db12019-05-14 15:45:19 -0700428 ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700429
Jens Axboe12aceb82020-05-01 13:11:09 -0600430 flags &= EFD_SHARED_FCNTL_FLAGS;
431 flags |= O_RDWR;
432 fd = get_unused_fd_flags(flags);
Eric Biggers7d815162018-01-06 09:45:42 -0800433 if (fd < 0)
Jens Axboe12aceb82020-05-01 13:11:09 -0600434 goto err;
Davide Libenzi562787a2009-09-22 16:43:57 -0700435
Jens Axboe12aceb82020-05-01 13:11:09 -0600436 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
437 if (IS_ERR(file)) {
438 put_unused_fd(fd);
439 fd = PTR_ERR(file);
440 goto err;
441 }
442
443 file->f_mode |= FMODE_NOWAIT;
444 fd_install(fd, file);
445 return fd;
446err:
447 eventfd_free_ctx(ctx);
Al Viro2030a422008-02-23 06:46:49 -0500448 return fd;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700449}
450
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100451SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
452{
453 return do_eventfd(count, flags);
454}
455
Heiko Carstensd4e82042009-01-14 14:14:34 +0100456SYSCALL_DEFINE1(eventfd, unsigned int, count)
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700457{
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100458 return do_eventfd(count, 0);
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700459}
Davide Libenzibcd0b232009-03-31 15:24:18 -0700460