blob: 78e41c7c3d05bbf0d673019279320bbcaa037f1f [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Davide Libenzie1ad7462007-05-10 22:23:19 -07002/*
3 * fs/eventfd.c
4 *
5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
6 *
7 */
8
9#include <linux/file.h>
10#include <linux/poll.h>
11#include <linux/init.h>
12#include <linux/fs.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010013#include <linux/sched/signal.h>
Davide Libenzie1ad7462007-05-10 22:23:19 -070014#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Davide Libenzie1ad7462007-05-10 22:23:19 -070016#include <linux/list.h>
17#include <linux/spinlock.h>
18#include <linux/anon_inodes.h>
Adrian Bunk7747cdb2008-02-06 01:36:49 -080019#include <linux/syscalls.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050020#include <linux/export.h>
Davide Libenzi13389012009-06-30 11:41:11 -070021#include <linux/kref.h>
22#include <linux/eventfd.h>
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -080023#include <linux/proc_fs.h>
24#include <linux/seq_file.h>
Masatake YAMATOb556db12019-05-14 15:45:19 -070025#include <linux/idr.h>
26
Jens Axboeb5e683d2020-02-02 08:23:03 -070027DEFINE_PER_CPU(int, eventfd_wake_count);
28
YueHaibingce528c42019-05-14 15:45:22 -070029static DEFINE_IDA(eventfd_ida);
Davide Libenzie1ad7462007-05-10 22:23:19 -070030
31struct eventfd_ctx {
Davide Libenzi13389012009-06-30 11:41:11 -070032 struct kref kref;
Davide Libenzie1ad7462007-05-10 22:23:19 -070033 wait_queue_head_t wqh;
34 /*
35 * Every time that a write(2) is performed on an eventfd, the
36 * value of the __u64 being written is added to "count" and a
37 * wakeup is performed on "wqh". A read(2) will return the "count"
38 * value to userspace, and will reset "count" to zero. The kernel
Davide Libenzi13389012009-06-30 11:41:11 -070039 * side eventfd_signal() also, adds to the "count" counter and
Davide Libenzie1ad7462007-05-10 22:23:19 -070040 * issue a wakeup.
41 */
42 __u64 count;
Davide Libenzibcd0b232009-03-31 15:24:18 -070043 unsigned int flags;
Masatake YAMATOb556db12019-05-14 15:45:19 -070044 int id;
Davide Libenzie1ad7462007-05-10 22:23:19 -070045};
46
Davide Libenzi13389012009-06-30 11:41:11 -070047/**
48 * eventfd_signal - Adds @n to the eventfd counter.
49 * @ctx: [in] Pointer to the eventfd context.
50 * @n: [in] Value of the counter to be added to the eventfd internal counter.
51 * The value cannot be negative.
52 *
53 * This function is supposed to be called by the kernel in paths that do not
54 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
Linus Torvaldsa9a08842018-02-11 14:34:03 -080055 * value, and we signal this as overflow condition by returning a EPOLLERR
Davide Libenzi13389012009-06-30 11:41:11 -070056 * to poll(2).
57 *
Masanari Iida20d5a862015-09-22 12:04:17 +090058 * Returns the amount by which the counter was incremented. This will be less
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070059 * than @n if the counter has overflowed.
Davide Libenzie1ad7462007-05-10 22:23:19 -070060 */
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070061__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
Davide Libenzie1ad7462007-05-10 22:23:19 -070062{
Davide Libenzie1ad7462007-05-10 22:23:19 -070063 unsigned long flags;
64
Jens Axboeb5e683d2020-02-02 08:23:03 -070065 /*
66 * Deadlock or stack overflow issues can happen if we recurse here
67 * through waitqueue wakeup handlers. If the caller users potentially
68 * nested waitqueues with custom wakeup handlers, then it should
69 * check eventfd_signal_count() before calling this function. If
70 * it returns true, the eventfd_signal() call should be deferred to a
71 * safe context.
72 */
73 if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
74 return 0;
75
Davide Libenzid48eb232007-05-18 12:02:33 -070076 spin_lock_irqsave(&ctx->wqh.lock, flags);
Jens Axboeb5e683d2020-02-02 08:23:03 -070077 this_cpu_inc(eventfd_wake_count);
Davide Libenzie1ad7462007-05-10 22:23:19 -070078 if (ULLONG_MAX - ctx->count < n)
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070079 n = ULLONG_MAX - ctx->count;
Davide Libenzie1ad7462007-05-10 22:23:19 -070080 ctx->count += n;
81 if (waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -080082 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
Jens Axboeb5e683d2020-02-02 08:23:03 -070083 this_cpu_dec(eventfd_wake_count);
Davide Libenzid48eb232007-05-18 12:02:33 -070084 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
Davide Libenzie1ad7462007-05-10 22:23:19 -070085
86 return n;
87}
Rusty Russell57186072009-06-12 22:27:09 -060088EXPORT_SYMBOL_GPL(eventfd_signal);
Davide Libenzie1ad7462007-05-10 22:23:19 -070089
Davide Libenzi562787a2009-09-22 16:43:57 -070090static void eventfd_free_ctx(struct eventfd_ctx *ctx)
91{
Masatake YAMATOb556db12019-05-14 15:45:19 -070092 if (ctx->id >= 0)
93 ida_simple_remove(&eventfd_ida, ctx->id);
Davide Libenzi562787a2009-09-22 16:43:57 -070094 kfree(ctx);
95}
96
Davide Libenzi13389012009-06-30 11:41:11 -070097static void eventfd_free(struct kref *kref)
98{
99 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
100
Davide Libenzi562787a2009-09-22 16:43:57 -0700101 eventfd_free_ctx(ctx);
Davide Libenzi13389012009-06-30 11:41:11 -0700102}
103
104/**
Davide Libenzi13389012009-06-30 11:41:11 -0700105 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
106 * @ctx: [in] Pointer to eventfd context.
107 *
108 * The eventfd context reference must have been previously acquired either
Eric Biggers105f2b72018-01-06 09:45:44 -0800109 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
Davide Libenzi13389012009-06-30 11:41:11 -0700110 */
111void eventfd_ctx_put(struct eventfd_ctx *ctx)
112{
113 kref_put(&ctx->kref, eventfd_free);
114}
115EXPORT_SYMBOL_GPL(eventfd_ctx_put);
116
Davide Libenzie1ad7462007-05-10 22:23:19 -0700117static int eventfd_release(struct inode *inode, struct file *file)
118{
Davide Libenzi13389012009-06-30 11:41:11 -0700119 struct eventfd_ctx *ctx = file->private_data;
120
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800121 wake_up_poll(&ctx->wqh, EPOLLHUP);
Davide Libenzi13389012009-06-30 11:41:11 -0700122 eventfd_ctx_put(ctx);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700123 return 0;
124}
125
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700126static __poll_t eventfd_poll(struct file *file, poll_table *wait)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700127{
128 struct eventfd_ctx *ctx = file->private_data;
Al Viro076ccb72017-07-03 01:02:18 -0400129 __poll_t events = 0;
Chris Masone22553e2015-02-17 13:46:07 -0800130 u64 count;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700131
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700132 poll_wait(file, &ctx->wqh, wait);
133
Paolo Bonzinia484c3d2016-03-22 14:27:14 -0700134 /*
135 * All writes to ctx->count occur within ctx->wqh.lock. This read
136 * can be done outside ctx->wqh.lock because we know that poll_wait
137 * takes that lock (through add_wait_queue) if our caller will sleep.
138 *
139 * The read _can_ therefore seep into add_wait_queue's critical
140 * section, but cannot move above it! add_wait_queue's spin_lock acts
141 * as an acquire barrier and ensures that the read be ordered properly
142 * against the writes. The following CAN happen and is safe:
143 *
144 * poll write
145 * ----------------- ------------
146 * lock ctx->wqh.lock (in poll_wait)
147 * count = ctx->count
148 * __add_wait_queue
149 * unlock ctx->wqh.lock
150 * lock ctx->qwh.lock
151 * ctx->count += n
152 * if (waitqueue_active)
153 * wake_up_locked_poll
154 * unlock ctx->qwh.lock
155 * eventfd_poll returns 0
156 *
157 * but the following, which would miss a wakeup, cannot happen:
158 *
159 * poll write
160 * ----------------- ------------
161 * count = ctx->count (INVALID!)
162 * lock ctx->qwh.lock
163 * ctx->count += n
164 * **waitqueue_active is false**
165 * **no wake_up_locked_poll!**
166 * unlock ctx->qwh.lock
167 * lock ctx->wqh.lock (in poll_wait)
168 * __add_wait_queue
169 * unlock ctx->wqh.lock
170 * eventfd_poll returns 0
171 */
172 count = READ_ONCE(ctx->count);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700173
Chris Masone22553e2015-02-17 13:46:07 -0800174 if (count > 0)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700175 events |= EPOLLIN;
Chris Masone22553e2015-02-17 13:46:07 -0800176 if (count == ULLONG_MAX)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800177 events |= EPOLLERR;
Chris Masone22553e2015-02-17 13:46:07 -0800178 if (ULLONG_MAX - 1 > count)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700179 events |= EPOLLOUT;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700180
181 return events;
182}
183
Davide Libenzicb289d62010-01-13 09:34:36 -0800184static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700185{
Davide Libenzicb289d62010-01-13 09:34:36 -0800186 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
187 ctx->count -= *cnt;
188}
189
190/**
191 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
192 * @ctx: [in] Pointer to eventfd context.
193 * @wait: [in] Wait queue to be removed.
Randy Dunlap36182182011-02-20 20:08:35 -0800194 * @cnt: [out] Pointer to the 64-bit counter value.
Davide Libenzicb289d62010-01-13 09:34:36 -0800195 *
Randy Dunlap36182182011-02-20 20:08:35 -0800196 * Returns %0 if successful, or the following error codes:
Davide Libenzicb289d62010-01-13 09:34:36 -0800197 *
198 * -EAGAIN : The operation would have blocked.
199 *
200 * This is used to atomically remove a wait queue entry from the eventfd wait
201 * queue head, and read/reset the counter value.
202 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200203int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
Davide Libenzicb289d62010-01-13 09:34:36 -0800204 __u64 *cnt)
205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&ctx->wqh.lock, flags);
209 eventfd_ctx_do_read(ctx, cnt);
210 __remove_wait_queue(&ctx->wqh, wait);
211 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800212 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
Davide Libenzicb289d62010-01-13 09:34:36 -0800213 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
214
215 return *cnt != 0 ? 0 : -EAGAIN;
216}
217EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
218
Eric Biggersb6364572018-01-06 09:45:43 -0800219static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
220 loff_t *ppos)
Davide Libenzicb289d62010-01-13 09:34:36 -0800221{
Eric Biggersb6364572018-01-06 09:45:43 -0800222 struct eventfd_ctx *ctx = file->private_data;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700223 ssize_t res;
Eric Biggersb6364572018-01-06 09:45:43 -0800224 __u64 ucnt = 0;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700225 DECLARE_WAITQUEUE(wait, current);
226
Eric Biggersb6364572018-01-06 09:45:43 -0800227 if (count < sizeof(ucnt))
228 return -EINVAL;
229
Davide Libenzid48eb232007-05-18 12:02:33 -0700230 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700231 res = -EAGAIN;
Davide Libenzibcd0b232009-03-31 15:24:18 -0700232 if (ctx->count > 0)
Eric Biggersb6364572018-01-06 09:45:43 -0800233 res = sizeof(ucnt);
234 else if (!(file->f_flags & O_NONBLOCK)) {
Davide Libenzie1ad7462007-05-10 22:23:19 -0700235 __add_wait_queue(&ctx->wqh, &wait);
Davide Libenzicb289d62010-01-13 09:34:36 -0800236 for (;;) {
Davide Libenzie1ad7462007-05-10 22:23:19 -0700237 set_current_state(TASK_INTERRUPTIBLE);
238 if (ctx->count > 0) {
Eric Biggersb6364572018-01-06 09:45:43 -0800239 res = sizeof(ucnt);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700240 break;
241 }
242 if (signal_pending(current)) {
243 res = -ERESTARTSYS;
244 break;
245 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700246 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700247 schedule();
Davide Libenzid48eb232007-05-18 12:02:33 -0700248 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700249 }
250 __remove_wait_queue(&ctx->wqh, &wait);
251 __set_current_state(TASK_RUNNING);
252 }
Eric Biggersb6364572018-01-06 09:45:43 -0800253 if (likely(res > 0)) {
254 eventfd_ctx_do_read(ctx, &ucnt);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700255 if (waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800256 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700257 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700258 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700259
Eric Biggersb6364572018-01-06 09:45:43 -0800260 if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
261 return -EFAULT;
262
Davide Libenzie1ad7462007-05-10 22:23:19 -0700263 return res;
264}
265
266static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
267 loff_t *ppos)
268{
269 struct eventfd_ctx *ctx = file->private_data;
270 ssize_t res;
271 __u64 ucnt;
272 DECLARE_WAITQUEUE(wait, current);
273
274 if (count < sizeof(ucnt))
275 return -EINVAL;
276 if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
277 return -EFAULT;
278 if (ucnt == ULLONG_MAX)
279 return -EINVAL;
Davide Libenzid48eb232007-05-18 12:02:33 -0700280 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700281 res = -EAGAIN;
282 if (ULLONG_MAX - ctx->count > ucnt)
283 res = sizeof(ucnt);
284 else if (!(file->f_flags & O_NONBLOCK)) {
285 __add_wait_queue(&ctx->wqh, &wait);
286 for (res = 0;;) {
287 set_current_state(TASK_INTERRUPTIBLE);
288 if (ULLONG_MAX - ctx->count > ucnt) {
289 res = sizeof(ucnt);
290 break;
291 }
292 if (signal_pending(current)) {
293 res = -ERESTARTSYS;
294 break;
295 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700296 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700297 schedule();
Davide Libenzid48eb232007-05-18 12:02:33 -0700298 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700299 }
300 __remove_wait_queue(&ctx->wqh, &wait);
301 __set_current_state(TASK_RUNNING);
302 }
Davide Libenzibcd0b232009-03-31 15:24:18 -0700303 if (likely(res > 0)) {
Davide Libenzie1ad7462007-05-10 22:23:19 -0700304 ctx->count += ucnt;
305 if (waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800306 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700307 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700308 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700309
310 return res;
311}
312
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800313#ifdef CONFIG_PROC_FS
Joe Perchesa3816ab2014-09-29 16:08:25 -0700314static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800315{
316 struct eventfd_ctx *ctx = f->private_data;
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800317
318 spin_lock_irq(&ctx->wqh.lock);
Joe Perchesa3816ab2014-09-29 16:08:25 -0700319 seq_printf(m, "eventfd-count: %16llx\n",
320 (unsigned long long)ctx->count);
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800321 spin_unlock_irq(&ctx->wqh.lock);
Masatake YAMATOb556db12019-05-14 15:45:19 -0700322 seq_printf(m, "eventfd-id: %d\n", ctx->id);
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800323}
324#endif
325
Davide Libenzie1ad7462007-05-10 22:23:19 -0700326static const struct file_operations eventfd_fops = {
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800327#ifdef CONFIG_PROC_FS
328 .show_fdinfo = eventfd_show_fdinfo,
329#endif
Davide Libenzie1ad7462007-05-10 22:23:19 -0700330 .release = eventfd_release,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700331 .poll = eventfd_poll,
Davide Libenzie1ad7462007-05-10 22:23:19 -0700332 .read = eventfd_read,
333 .write = eventfd_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200334 .llseek = noop_llseek,
Davide Libenzie1ad7462007-05-10 22:23:19 -0700335};
336
Davide Libenzi13389012009-06-30 11:41:11 -0700337/**
338 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
339 * @fd: [in] Eventfd file descriptor.
340 *
341 * Returns a pointer to the eventfd file structure in case of success, or the
342 * following error pointer:
343 *
344 * -EBADF : Invalid @fd file descriptor.
345 * -EINVAL : The @fd file descriptor is not an eventfd file.
346 */
Davide Libenzie1ad7462007-05-10 22:23:19 -0700347struct file *eventfd_fget(int fd)
348{
349 struct file *file;
350
351 file = fget(fd);
352 if (!file)
353 return ERR_PTR(-EBADF);
354 if (file->f_op != &eventfd_fops) {
355 fput(file);
356 return ERR_PTR(-EINVAL);
357 }
358
359 return file;
360}
Rusty Russell57186072009-06-12 22:27:09 -0600361EXPORT_SYMBOL_GPL(eventfd_fget);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700362
Davide Libenzi13389012009-06-30 11:41:11 -0700363/**
364 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
365 * @fd: [in] Eventfd file descriptor.
366 *
367 * Returns a pointer to the internal eventfd context, otherwise the error
368 * pointers returned by the following functions:
369 *
370 * eventfd_fget
371 */
372struct eventfd_ctx *eventfd_ctx_fdget(int fd)
373{
Davide Libenzi13389012009-06-30 11:41:11 -0700374 struct eventfd_ctx *ctx;
Al Viro36a74112013-12-23 16:51:33 -0500375 struct fd f = fdget(fd);
376 if (!f.file)
377 return ERR_PTR(-EBADF);
378 ctx = eventfd_ctx_fileget(f.file);
379 fdput(f);
Davide Libenzi13389012009-06-30 11:41:11 -0700380 return ctx;
381}
382EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
383
384/**
385 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
386 * @file: [in] Eventfd file pointer.
387 *
388 * Returns a pointer to the internal eventfd context, otherwise the error
389 * pointer:
390 *
391 * -EINVAL : The @fd file descriptor is not an eventfd file.
392 */
393struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
394{
Eric Biggers105f2b72018-01-06 09:45:44 -0800395 struct eventfd_ctx *ctx;
396
Davide Libenzi13389012009-06-30 11:41:11 -0700397 if (file->f_op != &eventfd_fops)
398 return ERR_PTR(-EINVAL);
399
Eric Biggers105f2b72018-01-06 09:45:44 -0800400 ctx = file->private_data;
401 kref_get(&ctx->kref);
402 return ctx;
Davide Libenzi13389012009-06-30 11:41:11 -0700403}
404EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
405
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100406static int do_eventfd(unsigned int count, int flags)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700407{
Davide Libenzie1ad7462007-05-10 22:23:19 -0700408 struct eventfd_ctx *ctx;
Eric Biggers7d815162018-01-06 09:45:42 -0800409 int fd;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700410
Ulrich Dreppere38b36f2008-07-23 21:29:42 -0700411 /* Check the EFD_* constants for consistency. */
412 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
413 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
414
Davide Libenzibcd0b232009-03-31 15:24:18 -0700415 if (flags & ~EFD_FLAGS_SET)
Eric Biggers7d815162018-01-06 09:45:42 -0800416 return -EINVAL;
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700417
Davide Libenzie1ad7462007-05-10 22:23:19 -0700418 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
419 if (!ctx)
Eric Biggers7d815162018-01-06 09:45:42 -0800420 return -ENOMEM;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700421
Davide Libenzi13389012009-06-30 11:41:11 -0700422 kref_init(&ctx->kref);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700423 init_waitqueue_head(&ctx->wqh);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700424 ctx->count = count;
Davide Libenzibcd0b232009-03-31 15:24:18 -0700425 ctx->flags = flags;
Masatake YAMATOb556db12019-05-14 15:45:19 -0700426 ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700427
Eric Biggers7d815162018-01-06 09:45:42 -0800428 fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
429 O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
430 if (fd < 0)
Davide Libenzi562787a2009-09-22 16:43:57 -0700431 eventfd_free_ctx(ctx);
432
Al Viro2030a422008-02-23 06:46:49 -0500433 return fd;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700434}
435
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100436SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
437{
438 return do_eventfd(count, flags);
439}
440
Heiko Carstensd4e82042009-01-14 14:14:34 +0100441SYSCALL_DEFINE1(eventfd, unsigned int, count)
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700442{
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100443 return do_eventfd(count, 0);
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700444}
Davide Libenzibcd0b232009-03-31 15:24:18 -0700445