Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_POLL_H |
| 3 | #define _LINUX_POLL_H |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | |
| 6 | #include <linux/compiler.h> |
Alexey Dobriyan | a99bbaf | 2009-10-04 16:11:37 +0400 | [diff] [blame] | 7 | #include <linux/ktime.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/wait.h> |
| 9 | #include <linux/string.h> |
Al Viro | f23f6e0 | 2006-10-20 15:17:02 -0400 | [diff] [blame] | 10 | #include <linux/fs.h> |
Dave Young | 9ff9933 | 2010-03-10 15:24:10 -0800 | [diff] [blame] | 11 | #include <linux/sysctl.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 12 | #include <linux/uaccess.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 13 | #include <uapi/linux/poll.h> |
Al Viro | e78cd95 | 2018-02-01 11:01:35 -0500 | [diff] [blame] | 14 | #include <uapi/linux/eventpoll.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
Dave Young | 9ff9933 | 2010-03-10 15:24:10 -0800 | [diff] [blame] | 16 | extern struct ctl_table epoll_table[]; /* for sysctl */ |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 17 | /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating |
| 18 | additional memory. */ |
Arnd Bergmann | ad312f9 | 2019-05-14 15:41:42 -0700 | [diff] [blame] | 19 | #ifdef __clang__ |
| 20 | #define MAX_STACK_ALLOC 768 |
| 21 | #else |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 22 | #define MAX_STACK_ALLOC 832 |
Arnd Bergmann | ad312f9 | 2019-05-14 15:41:42 -0700 | [diff] [blame] | 23 | #endif |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 24 | #define FRONTEND_STACK_ALLOC 256 |
| 25 | #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC |
| 26 | #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC |
| 27 | #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) |
| 28 | #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) |
| 29 | |
Al Viro | e78cd95 | 2018-02-01 11:01:35 -0500 | [diff] [blame] | 30 | #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM) |
Alexey Dobriyan | dd23aae | 2007-09-11 15:23:55 -0700 | [diff] [blame] | 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | struct poll_table_struct; |
| 33 | |
| 34 | /* |
| 35 | * structures and helpers for f_op->poll implementations |
| 36 | */ |
| 37 | typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); |
| 38 | |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 39 | /* |
| 40 | * Do not touch the structure directly, use the access functions |
| 41 | * poll_does_not_wait() and poll_requested_events() instead. |
| 42 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | typedef struct poll_table_struct { |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 44 | poll_queue_proc _qproc; |
Al Viro | 0169943 | 2017-07-03 03:14:15 -0400 | [diff] [blame] | 45 | __poll_t _key; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | } poll_table; |
| 47 | |
| 48 | static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) |
| 49 | { |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 50 | if (p && p->_qproc && wait_address) |
| 51 | p->_qproc(filp, wait_address, p); |
| 52 | } |
| 53 | |
| 54 | /* |
| 55 | * Return true if it is guaranteed that poll will not wait. This is the case |
| 56 | * if the poll() of another file descriptor in the set got an event, so there |
| 57 | * is no need for waiting. |
| 58 | */ |
| 59 | static inline bool poll_does_not_wait(const poll_table *p) |
| 60 | { |
| 61 | return p == NULL || p->_qproc == NULL; |
| 62 | } |
| 63 | |
| 64 | /* |
| 65 | * Return the set of events that the application wants to poll for. |
| 66 | * This is useful for drivers that need to know whether a DMA transfer has |
| 67 | * to be started implicitly on poll(). You typically only want to do that |
| 68 | * if the application is actually polling for POLLIN and/or POLLOUT. |
| 69 | */ |
Al Viro | 0169943 | 2017-07-03 03:14:15 -0400 | [diff] [blame] | 70 | static inline __poll_t poll_requested_events(const poll_table *p) |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 71 | { |
Al Viro | 0169943 | 2017-07-03 03:14:15 -0400 | [diff] [blame] | 72 | return p ? p->_key : ~(__poll_t)0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) |
| 76 | { |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 77 | pt->_qproc = qproc; |
Al Viro | 0169943 | 2017-07-03 03:14:15 -0400 | [diff] [blame] | 78 | pt->_key = ~(__poll_t)0; /* all events enabled */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | } |
| 80 | |
Christoph Hellwig | 3deb642 | 2018-01-09 15:29:24 +0100 | [diff] [blame] | 81 | static inline bool file_can_poll(struct file *file) |
Christoph Hellwig | 9965ed17 | 2018-03-05 07:26:05 -0800 | [diff] [blame] | 82 | { |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 83 | return file->f_op->poll; |
Christoph Hellwig | 9965ed17 | 2018-03-05 07:26:05 -0800 | [diff] [blame] | 84 | } |
| 85 | |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 86 | static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) |
| 87 | { |
| 88 | if (unlikely(!file->f_op->poll)) |
| 89 | return DEFAULT_POLLMASK; |
| 90 | return file->f_op->poll(file, pt); |
| 91 | } |
Christoph Hellwig | 3deb642 | 2018-01-09 15:29:24 +0100 | [diff] [blame] | 92 | |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 93 | struct poll_table_entry { |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 94 | struct file *filp; |
Al Viro | ddc0505f | 2017-07-16 20:42:21 -0400 | [diff] [blame] | 95 | __poll_t key; |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 96 | wait_queue_entry_t wait; |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 97 | wait_queue_head_t *wait_address; |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 98 | }; |
| 99 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | /* |
Namhyung Kim | dac36dd | 2010-12-10 01:57:07 +0900 | [diff] [blame] | 101 | * Structures and helpers for select/poll syscall |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | */ |
| 103 | struct poll_wqueues { |
| 104 | poll_table pt; |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 105 | struct poll_table_page *table; |
| 106 | struct task_struct *polling_task; |
| 107 | int triggered; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | int error; |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 109 | int inline_index; |
| 110 | struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | }; |
| 112 | |
| 113 | extern void poll_initwait(struct poll_wqueues *pwq); |
| 114 | extern void poll_freewait(struct poll_wqueues *pwq); |
Deepa Dinamani | 766b9f9 | 2016-05-19 17:09:05 -0700 | [diff] [blame] | 115 | extern u64 select_estimate_accuracy(struct timespec64 *tv); |
Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 116 | |
David Woodhouse | 9f72949 | 2006-01-18 17:44:05 -0800 | [diff] [blame] | 117 | #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) |
| 118 | |
Al Viro | a2dcb44 | 2008-04-23 14:05:15 -0400 | [diff] [blame] | 119 | extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, |
Deepa Dinamani | 766b9f9 | 2016-05-19 17:09:05 -0700 | [diff] [blame] | 120 | fd_set __user *exp, struct timespec64 *end_time); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | |
Deepa Dinamani | 766b9f9 | 2016-05-19 17:09:05 -0700 | [diff] [blame] | 122 | extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, |
| 123 | long nsec); |
Thomas Gleixner | b773ad4 | 2008-08-31 08:16:57 -0700 | [diff] [blame] | 124 | |
Al Viro | 7a163b2 | 2018-02-01 15:13:18 -0500 | [diff] [blame] | 125 | #define __MAP(v, from, to) \ |
| 126 | (from < to ? (v & from) * (to/from) : (v & from) / (from/to)) |
| 127 | |
| 128 | static inline __u16 mangle_poll(__poll_t val) |
| 129 | { |
| 130 | __u16 v = (__force __u16)val; |
| 131 | #define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X) |
| 132 | return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | |
| 133 | M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | |
| 134 | M(HUP) | M(RDHUP) | M(MSG); |
| 135 | #undef M |
| 136 | } |
| 137 | |
| 138 | static inline __poll_t demangle_poll(u16 val) |
| 139 | { |
| 140 | #define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X) |
| 141 | return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | |
| 142 | M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | |
| 143 | M(HUP) | M(RDHUP) | M(MSG); |
| 144 | #undef M |
| 145 | } |
| 146 | #undef __MAP |
| 147 | |
| 148 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | #endif /* _LINUX_POLL_H */ |