blob: 3627dd7d25db80c3f7e65afb8a5cda2e977f5e84 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Davide Libenzie1ad7462007-05-10 22:23:19 -07002/*
3 * fs/eventfd.c
4 *
5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
6 *
7 */
8
9#include <linux/file.h>
10#include <linux/poll.h>
11#include <linux/init.h>
12#include <linux/fs.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010013#include <linux/sched/signal.h>
Davide Libenzie1ad7462007-05-10 22:23:19 -070014#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Davide Libenzie1ad7462007-05-10 22:23:19 -070016#include <linux/list.h>
17#include <linux/spinlock.h>
18#include <linux/anon_inodes.h>
Adrian Bunk7747cdb2008-02-06 01:36:49 -080019#include <linux/syscalls.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050020#include <linux/export.h>
Davide Libenzi13389012009-06-30 11:41:11 -070021#include <linux/kref.h>
22#include <linux/eventfd.h>
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -080023#include <linux/proc_fs.h>
24#include <linux/seq_file.h>
Masatake YAMATOb556db12019-05-14 15:45:19 -070025#include <linux/idr.h>
Jens Axboe12aceb82020-05-01 13:11:09 -060026#include <linux/uio.h>
Masatake YAMATOb556db12019-05-14 15:45:19 -070027
YueHaibingce528c42019-05-14 15:45:22 -070028static DEFINE_IDA(eventfd_ida);
Davide Libenzie1ad7462007-05-10 22:23:19 -070029
30struct eventfd_ctx {
Davide Libenzi13389012009-06-30 11:41:11 -070031 struct kref kref;
Davide Libenzie1ad7462007-05-10 22:23:19 -070032 wait_queue_head_t wqh;
33 /*
34 * Every time that a write(2) is performed on an eventfd, the
35 * value of the __u64 being written is added to "count" and a
36 * wakeup is performed on "wqh". A read(2) will return the "count"
37 * value to userspace, and will reset "count" to zero. The kernel
Davide Libenzi13389012009-06-30 11:41:11 -070038 * side eventfd_signal() also, adds to the "count" counter and
Davide Libenzie1ad7462007-05-10 22:23:19 -070039 * issue a wakeup.
40 */
41 __u64 count;
Davide Libenzibcd0b232009-03-31 15:24:18 -070042 unsigned int flags;
Masatake YAMATOb556db12019-05-14 15:45:19 -070043 int id;
Davide Libenzie1ad7462007-05-10 22:23:19 -070044};
45
Davide Libenzi13389012009-06-30 11:41:11 -070046/**
47 * eventfd_signal - Adds @n to the eventfd counter.
48 * @ctx: [in] Pointer to the eventfd context.
49 * @n: [in] Value of the counter to be added to the eventfd internal counter.
50 * The value cannot be negative.
51 *
52 * This function is supposed to be called by the kernel in paths that do not
53 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
Linus Torvaldsa9a08842018-02-11 14:34:03 -080054 * value, and we signal this as overflow condition by returning a EPOLLERR
Davide Libenzi13389012009-06-30 11:41:11 -070055 * to poll(2).
56 *
Masanari Iida20d5a862015-09-22 12:04:17 +090057 * Returns the amount by which the counter was incremented. This will be less
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070058 * than @n if the counter has overflowed.
Davide Libenzie1ad7462007-05-10 22:23:19 -070059 */
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070060__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
Davide Libenzie1ad7462007-05-10 22:23:19 -070061{
Davide Libenzie1ad7462007-05-10 22:23:19 -070062 unsigned long flags;
63
Jens Axboeb5e683d2020-02-02 08:23:03 -070064 /*
65 * Deadlock or stack overflow issues can happen if we recurse here
66 * through waitqueue wakeup handlers. If the caller users potentially
67 * nested waitqueues with custom wakeup handlers, then it should
Thomas Gleixnerb542e382021-07-29 13:01:59 +020068 * check eventfd_signal_allowed() before calling this function. If
69 * it returns false, the eventfd_signal() call should be deferred to a
Jens Axboeb5e683d2020-02-02 08:23:03 -070070 * safe context.
71 */
Thomas Gleixnerb542e382021-07-29 13:01:59 +020072 if (WARN_ON_ONCE(current->in_eventfd_signal))
Jens Axboeb5e683d2020-02-02 08:23:03 -070073 return 0;
74
Davide Libenzid48eb232007-05-18 12:02:33 -070075 spin_lock_irqsave(&ctx->wqh.lock, flags);
Thomas Gleixnerb542e382021-07-29 13:01:59 +020076 current->in_eventfd_signal = 1;
Davide Libenzie1ad7462007-05-10 22:23:19 -070077 if (ULLONG_MAX - ctx->count < n)
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070078 n = ULLONG_MAX - ctx->count;
Davide Libenzie1ad7462007-05-10 22:23:19 -070079 ctx->count += n;
80 if (waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -080081 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
Thomas Gleixnerb542e382021-07-29 13:01:59 +020082 current->in_eventfd_signal = 0;
Davide Libenzid48eb232007-05-18 12:02:33 -070083 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
Davide Libenzie1ad7462007-05-10 22:23:19 -070084
85 return n;
86}
Rusty Russell57186072009-06-12 22:27:09 -060087EXPORT_SYMBOL_GPL(eventfd_signal);
Davide Libenzie1ad7462007-05-10 22:23:19 -070088
Davide Libenzi562787a2009-09-22 16:43:57 -070089static void eventfd_free_ctx(struct eventfd_ctx *ctx)
90{
Masatake YAMATOb556db12019-05-14 15:45:19 -070091 if (ctx->id >= 0)
92 ida_simple_remove(&eventfd_ida, ctx->id);
Davide Libenzi562787a2009-09-22 16:43:57 -070093 kfree(ctx);
94}
95
Davide Libenzi13389012009-06-30 11:41:11 -070096static void eventfd_free(struct kref *kref)
97{
98 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
99
Davide Libenzi562787a2009-09-22 16:43:57 -0700100 eventfd_free_ctx(ctx);
Davide Libenzi13389012009-06-30 11:41:11 -0700101}
102
103/**
Davide Libenzi13389012009-06-30 11:41:11 -0700104 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
105 * @ctx: [in] Pointer to eventfd context.
106 *
107 * The eventfd context reference must have been previously acquired either
Eric Biggers105f2b72018-01-06 09:45:44 -0800108 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
Davide Libenzi13389012009-06-30 11:41:11 -0700109 */
110void eventfd_ctx_put(struct eventfd_ctx *ctx)
111{
112 kref_put(&ctx->kref, eventfd_free);
113}
114EXPORT_SYMBOL_GPL(eventfd_ctx_put);
115
Davide Libenzie1ad7462007-05-10 22:23:19 -0700116static int eventfd_release(struct inode *inode, struct file *file)
117{
Davide Libenzi13389012009-06-30 11:41:11 -0700118 struct eventfd_ctx *ctx = file->private_data;
119
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800120 wake_up_poll(&ctx->wqh, EPOLLHUP);
Davide Libenzi13389012009-06-30 11:41:11 -0700121 eventfd_ctx_put(ctx);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700122 return 0;
123}
124
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700125static __poll_t eventfd_poll(struct file *file, poll_table *wait)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700126{
127 struct eventfd_ctx *ctx = file->private_data;
Al Viro076ccb72017-07-03 01:02:18 -0400128 __poll_t events = 0;
Chris Masone22553e2015-02-17 13:46:07 -0800129 u64 count;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700130
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700131 poll_wait(file, &ctx->wqh, wait);
132
Paolo Bonzinia484c3d2016-03-22 14:27:14 -0700133 /*
134 * All writes to ctx->count occur within ctx->wqh.lock. This read
135 * can be done outside ctx->wqh.lock because we know that poll_wait
136 * takes that lock (through add_wait_queue) if our caller will sleep.
137 *
138 * The read _can_ therefore seep into add_wait_queue's critical
139 * section, but cannot move above it! add_wait_queue's spin_lock acts
140 * as an acquire barrier and ensures that the read be ordered properly
141 * against the writes. The following CAN happen and is safe:
142 *
143 * poll write
144 * ----------------- ------------
145 * lock ctx->wqh.lock (in poll_wait)
146 * count = ctx->count
147 * __add_wait_queue
148 * unlock ctx->wqh.lock
149 * lock ctx->qwh.lock
150 * ctx->count += n
151 * if (waitqueue_active)
152 * wake_up_locked_poll
153 * unlock ctx->qwh.lock
154 * eventfd_poll returns 0
155 *
156 * but the following, which would miss a wakeup, cannot happen:
157 *
158 * poll write
159 * ----------------- ------------
160 * count = ctx->count (INVALID!)
161 * lock ctx->qwh.lock
162 * ctx->count += n
163 * **waitqueue_active is false**
164 * **no wake_up_locked_poll!**
165 * unlock ctx->qwh.lock
166 * lock ctx->wqh.lock (in poll_wait)
167 * __add_wait_queue
168 * unlock ctx->wqh.lock
169 * eventfd_poll returns 0
170 */
171 count = READ_ONCE(ctx->count);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700172
Chris Masone22553e2015-02-17 13:46:07 -0800173 if (count > 0)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700174 events |= EPOLLIN;
Chris Masone22553e2015-02-17 13:46:07 -0800175 if (count == ULLONG_MAX)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800176 events |= EPOLLERR;
Chris Masone22553e2015-02-17 13:46:07 -0800177 if (ULLONG_MAX - 1 > count)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700178 events |= EPOLLOUT;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700179
180 return events;
181}
182
David Woodhouse28f13262020-10-27 13:55:21 +0000183void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700184{
David Woodhouse28f13262020-10-27 13:55:21 +0000185 lockdep_assert_held(&ctx->wqh.lock);
186
Davide Libenzicb289d62010-01-13 09:34:36 -0800187 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
188 ctx->count -= *cnt;
189}
David Woodhouse28f13262020-10-27 13:55:21 +0000190EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
Davide Libenzicb289d62010-01-13 09:34:36 -0800191
192/**
193 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
194 * @ctx: [in] Pointer to eventfd context.
195 * @wait: [in] Wait queue to be removed.
Randy Dunlap36182182011-02-20 20:08:35 -0800196 * @cnt: [out] Pointer to the 64-bit counter value.
Davide Libenzicb289d62010-01-13 09:34:36 -0800197 *
Randy Dunlap36182182011-02-20 20:08:35 -0800198 * Returns %0 if successful, or the following error codes:
Davide Libenzicb289d62010-01-13 09:34:36 -0800199 *
200 * -EAGAIN : The operation would have blocked.
201 *
202 * This is used to atomically remove a wait queue entry from the eventfd wait
203 * queue head, and read/reset the counter value.
204 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200205int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
Davide Libenzicb289d62010-01-13 09:34:36 -0800206 __u64 *cnt)
207{
208 unsigned long flags;
209
210 spin_lock_irqsave(&ctx->wqh.lock, flags);
211 eventfd_ctx_do_read(ctx, cnt);
212 __remove_wait_queue(&ctx->wqh, wait);
213 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800214 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
Davide Libenzicb289d62010-01-13 09:34:36 -0800215 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
216
217 return *cnt != 0 ? 0 : -EAGAIN;
218}
219EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
220
Jens Axboe12aceb82020-05-01 13:11:09 -0600221static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
Davide Libenzicb289d62010-01-13 09:34:36 -0800222{
Jens Axboe12aceb82020-05-01 13:11:09 -0600223 struct file *file = iocb->ki_filp;
Eric Biggersb6364572018-01-06 09:45:43 -0800224 struct eventfd_ctx *ctx = file->private_data;
Eric Biggersb6364572018-01-06 09:45:43 -0800225 __u64 ucnt = 0;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700226 DECLARE_WAITQUEUE(wait, current);
227
Jens Axboe12aceb82020-05-01 13:11:09 -0600228 if (iov_iter_count(to) < sizeof(ucnt))
Eric Biggersb6364572018-01-06 09:45:43 -0800229 return -EINVAL;
Davide Libenzid48eb232007-05-18 12:02:33 -0700230 spin_lock_irq(&ctx->wqh.lock);
Jens Axboe12aceb82020-05-01 13:11:09 -0600231 if (!ctx->count) {
232 if ((file->f_flags & O_NONBLOCK) ||
233 (iocb->ki_flags & IOCB_NOWAIT)) {
234 spin_unlock_irq(&ctx->wqh.lock);
235 return -EAGAIN;
236 }
Davide Libenzie1ad7462007-05-10 22:23:19 -0700237 __add_wait_queue(&ctx->wqh, &wait);
Davide Libenzicb289d62010-01-13 09:34:36 -0800238 for (;;) {
Davide Libenzie1ad7462007-05-10 22:23:19 -0700239 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe12aceb82020-05-01 13:11:09 -0600240 if (ctx->count)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700241 break;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700242 if (signal_pending(current)) {
Jens Axboe12aceb82020-05-01 13:11:09 -0600243 __remove_wait_queue(&ctx->wqh, &wait);
244 __set_current_state(TASK_RUNNING);
245 spin_unlock_irq(&ctx->wqh.lock);
246 return -ERESTARTSYS;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700247 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700248 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700249 schedule();
Davide Libenzid48eb232007-05-18 12:02:33 -0700250 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700251 }
252 __remove_wait_queue(&ctx->wqh, &wait);
253 __set_current_state(TASK_RUNNING);
254 }
Jens Axboe12aceb82020-05-01 13:11:09 -0600255 eventfd_ctx_do_read(ctx, &ucnt);
256 if (waitqueue_active(&ctx->wqh))
257 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
Davide Libenzid48eb232007-05-18 12:02:33 -0700258 spin_unlock_irq(&ctx->wqh.lock);
Jens Axboe12aceb82020-05-01 13:11:09 -0600259 if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
Eric Biggersb6364572018-01-06 09:45:43 -0800260 return -EFAULT;
261
Jens Axboe12aceb82020-05-01 13:11:09 -0600262 return sizeof(ucnt);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700263}
264
265static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
266 loff_t *ppos)
267{
268 struct eventfd_ctx *ctx = file->private_data;
269 ssize_t res;
270 __u64 ucnt;
271 DECLARE_WAITQUEUE(wait, current);
272
273 if (count < sizeof(ucnt))
274 return -EINVAL;
275 if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
276 return -EFAULT;
277 if (ucnt == ULLONG_MAX)
278 return -EINVAL;
Davide Libenzid48eb232007-05-18 12:02:33 -0700279 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700280 res = -EAGAIN;
281 if (ULLONG_MAX - ctx->count > ucnt)
282 res = sizeof(ucnt);
283 else if (!(file->f_flags & O_NONBLOCK)) {
284 __add_wait_queue(&ctx->wqh, &wait);
285 for (res = 0;;) {
286 set_current_state(TASK_INTERRUPTIBLE);
287 if (ULLONG_MAX - ctx->count > ucnt) {
288 res = sizeof(ucnt);
289 break;
290 }
291 if (signal_pending(current)) {
292 res = -ERESTARTSYS;
293 break;
294 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700295 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700296 schedule();
Davide Libenzid48eb232007-05-18 12:02:33 -0700297 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700298 }
299 __remove_wait_queue(&ctx->wqh, &wait);
300 __set_current_state(TASK_RUNNING);
301 }
Davide Libenzibcd0b232009-03-31 15:24:18 -0700302 if (likely(res > 0)) {
Davide Libenzie1ad7462007-05-10 22:23:19 -0700303 ctx->count += ucnt;
304 if (waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800305 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700306 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700307 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700308
309 return res;
310}
311
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800312#ifdef CONFIG_PROC_FS
Joe Perchesa3816ab2014-09-29 16:08:25 -0700313static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800314{
315 struct eventfd_ctx *ctx = f->private_data;
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800316
317 spin_lock_irq(&ctx->wqh.lock);
Joe Perchesa3816ab2014-09-29 16:08:25 -0700318 seq_printf(m, "eventfd-count: %16llx\n",
319 (unsigned long long)ctx->count);
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800320 spin_unlock_irq(&ctx->wqh.lock);
Masatake YAMATOb556db12019-05-14 15:45:19 -0700321 seq_printf(m, "eventfd-id: %d\n", ctx->id);
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800322}
323#endif
324
Davide Libenzie1ad7462007-05-10 22:23:19 -0700325static const struct file_operations eventfd_fops = {
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800326#ifdef CONFIG_PROC_FS
327 .show_fdinfo = eventfd_show_fdinfo,
328#endif
Davide Libenzie1ad7462007-05-10 22:23:19 -0700329 .release = eventfd_release,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700330 .poll = eventfd_poll,
Jens Axboe12aceb82020-05-01 13:11:09 -0600331 .read_iter = eventfd_read,
Davide Libenzie1ad7462007-05-10 22:23:19 -0700332 .write = eventfd_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200333 .llseek = noop_llseek,
Davide Libenzie1ad7462007-05-10 22:23:19 -0700334};
335
Davide Libenzi13389012009-06-30 11:41:11 -0700336/**
337 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
338 * @fd: [in] Eventfd file descriptor.
339 *
340 * Returns a pointer to the eventfd file structure in case of success, or the
341 * following error pointer:
342 *
343 * -EBADF : Invalid @fd file descriptor.
344 * -EINVAL : The @fd file descriptor is not an eventfd file.
345 */
Davide Libenzie1ad7462007-05-10 22:23:19 -0700346struct file *eventfd_fget(int fd)
347{
348 struct file *file;
349
350 file = fget(fd);
351 if (!file)
352 return ERR_PTR(-EBADF);
353 if (file->f_op != &eventfd_fops) {
354 fput(file);
355 return ERR_PTR(-EINVAL);
356 }
357
358 return file;
359}
Rusty Russell57186072009-06-12 22:27:09 -0600360EXPORT_SYMBOL_GPL(eventfd_fget);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700361
Davide Libenzi13389012009-06-30 11:41:11 -0700362/**
363 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
364 * @fd: [in] Eventfd file descriptor.
365 *
366 * Returns a pointer to the internal eventfd context, otherwise the error
367 * pointers returned by the following functions:
368 *
369 * eventfd_fget
370 */
371struct eventfd_ctx *eventfd_ctx_fdget(int fd)
372{
Davide Libenzi13389012009-06-30 11:41:11 -0700373 struct eventfd_ctx *ctx;
Al Viro36a74112013-12-23 16:51:33 -0500374 struct fd f = fdget(fd);
375 if (!f.file)
376 return ERR_PTR(-EBADF);
377 ctx = eventfd_ctx_fileget(f.file);
378 fdput(f);
Davide Libenzi13389012009-06-30 11:41:11 -0700379 return ctx;
380}
381EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
382
383/**
384 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
385 * @file: [in] Eventfd file pointer.
386 *
387 * Returns a pointer to the internal eventfd context, otherwise the error
388 * pointer:
389 *
390 * -EINVAL : The @fd file descriptor is not an eventfd file.
391 */
392struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
393{
Eric Biggers105f2b72018-01-06 09:45:44 -0800394 struct eventfd_ctx *ctx;
395
Davide Libenzi13389012009-06-30 11:41:11 -0700396 if (file->f_op != &eventfd_fops)
397 return ERR_PTR(-EINVAL);
398
Eric Biggers105f2b72018-01-06 09:45:44 -0800399 ctx = file->private_data;
400 kref_get(&ctx->kref);
401 return ctx;
Davide Libenzi13389012009-06-30 11:41:11 -0700402}
403EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
404
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100405static int do_eventfd(unsigned int count, int flags)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700406{
Davide Libenzie1ad7462007-05-10 22:23:19 -0700407 struct eventfd_ctx *ctx;
Jens Axboe12aceb82020-05-01 13:11:09 -0600408 struct file *file;
Eric Biggers7d815162018-01-06 09:45:42 -0800409 int fd;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700410
Ulrich Dreppere38b36f2008-07-23 21:29:42 -0700411 /* Check the EFD_* constants for consistency. */
412 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
413 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
414
Davide Libenzibcd0b232009-03-31 15:24:18 -0700415 if (flags & ~EFD_FLAGS_SET)
Eric Biggers7d815162018-01-06 09:45:42 -0800416 return -EINVAL;
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700417
Davide Libenzie1ad7462007-05-10 22:23:19 -0700418 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
419 if (!ctx)
Eric Biggers7d815162018-01-06 09:45:42 -0800420 return -ENOMEM;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700421
Davide Libenzi13389012009-06-30 11:41:11 -0700422 kref_init(&ctx->kref);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700423 init_waitqueue_head(&ctx->wqh);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700424 ctx->count = count;
Davide Libenzibcd0b232009-03-31 15:24:18 -0700425 ctx->flags = flags;
Masatake YAMATOb556db12019-05-14 15:45:19 -0700426 ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700427
Jens Axboe12aceb82020-05-01 13:11:09 -0600428 flags &= EFD_SHARED_FCNTL_FLAGS;
429 flags |= O_RDWR;
430 fd = get_unused_fd_flags(flags);
Eric Biggers7d815162018-01-06 09:45:42 -0800431 if (fd < 0)
Jens Axboe12aceb82020-05-01 13:11:09 -0600432 goto err;
Davide Libenzi562787a2009-09-22 16:43:57 -0700433
Jens Axboe12aceb82020-05-01 13:11:09 -0600434 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
435 if (IS_ERR(file)) {
436 put_unused_fd(fd);
437 fd = PTR_ERR(file);
438 goto err;
439 }
440
441 file->f_mode |= FMODE_NOWAIT;
442 fd_install(fd, file);
443 return fd;
444err:
445 eventfd_free_ctx(ctx);
Al Viro2030a422008-02-23 06:46:49 -0500446 return fd;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700447}
448
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100449SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
450{
451 return do_eventfd(count, flags);
452}
453
Heiko Carstensd4e82042009-01-14 14:14:34 +0100454SYSCALL_DEFINE1(eventfd, unsigned int, count)
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700455{
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100456 return do_eventfd(count, 0);
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700457}
Davide Libenzibcd0b232009-03-31 15:24:18 -0700458