Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_POLL_H |
| 2 | #define _LINUX_POLL_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
| 5 | #include <linux/compiler.h> |
Alexey Dobriyan | a99bbaf | 2009-10-04 16:11:37 +0400 | [diff] [blame] | 6 | #include <linux/ktime.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/wait.h> |
| 8 | #include <linux/string.h> |
Al Viro | f23f6e0 | 2006-10-20 15:17:02 -0400 | [diff] [blame] | 9 | #include <linux/fs.h> |
Dave Young | 9ff9933 | 2010-03-10 15:24:10 -0800 | [diff] [blame] | 10 | #include <linux/sysctl.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <asm/uaccess.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 12 | #include <uapi/linux/poll.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Dave Young | 9ff9933 | 2010-03-10 15:24:10 -0800 | [diff] [blame] | 14 | extern struct ctl_table epoll_table[]; /* for sysctl */ |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 15 | /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating |
| 16 | additional memory. */ |
Arnd Bergmann | cbad0dc | 2019-05-14 15:41:42 -0700 | [diff] [blame^] | 17 | #ifdef __clang__ |
| 18 | #define MAX_STACK_ALLOC 768 |
| 19 | #else |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 20 | #define MAX_STACK_ALLOC 832 |
Arnd Bergmann | cbad0dc | 2019-05-14 15:41:42 -0700 | [diff] [blame^] | 21 | #endif |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 22 | #define FRONTEND_STACK_ALLOC 256 |
| 23 | #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC |
| 24 | #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC |
| 25 | #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) |
| 26 | #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) |
| 27 | |
Alexey Dobriyan | dd23aae | 2007-09-11 15:23:55 -0700 | [diff] [blame] | 28 | #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) |
| 29 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | struct poll_table_struct; |
| 31 | |
| 32 | /* |
| 33 | * structures and helpers for f_op->poll implementations |
| 34 | */ |
| 35 | typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); |
| 36 | |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 37 | /* |
| 38 | * Do not touch the structure directly, use the access functions |
| 39 | * poll_does_not_wait() and poll_requested_events() instead. |
| 40 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | typedef struct poll_table_struct { |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 42 | poll_queue_proc _qproc; |
| 43 | unsigned long _key; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | } poll_table; |
| 45 | |
| 46 | static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) |
| 47 | { |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 48 | if (p && p->_qproc && wait_address) |
| 49 | p->_qproc(filp, wait_address, p); |
| 50 | } |
| 51 | |
| 52 | /* |
| 53 | * Return true if it is guaranteed that poll will not wait. This is the case |
| 54 | * if the poll() of another file descriptor in the set got an event, so there |
| 55 | * is no need for waiting. |
| 56 | */ |
| 57 | static inline bool poll_does_not_wait(const poll_table *p) |
| 58 | { |
| 59 | return p == NULL || p->_qproc == NULL; |
| 60 | } |
| 61 | |
| 62 | /* |
| 63 | * Return the set of events that the application wants to poll for. |
| 64 | * This is useful for drivers that need to know whether a DMA transfer has |
| 65 | * to be started implicitly on poll(). You typically only want to do that |
| 66 | * if the application is actually polling for POLLIN and/or POLLOUT. |
| 67 | */ |
| 68 | static inline unsigned long poll_requested_events(const poll_table *p) |
| 69 | { |
| 70 | return p ? p->_key : ~0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) |
| 74 | { |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 75 | pt->_qproc = qproc; |
| 76 | pt->_key = ~0UL; /* all events enabled */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | } |
| 78 | |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 79 | struct poll_table_entry { |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 80 | struct file *filp; |
Eric Dumazet | 4938d7e | 2009-06-16 15:33:36 -0700 | [diff] [blame] | 81 | unsigned long key; |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 82 | wait_queue_t wait; |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 83 | wait_queue_head_t *wait_address; |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 84 | }; |
| 85 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | /* |
Namhyung Kim | dac36dd | 2010-12-10 01:57:07 +0900 | [diff] [blame] | 87 | * Structures and helpers for select/poll syscall |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | */ |
| 89 | struct poll_wqueues { |
| 90 | poll_table pt; |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 91 | struct poll_table_page *table; |
| 92 | struct task_struct *polling_task; |
| 93 | int triggered; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | int error; |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 95 | int inline_index; |
| 96 | struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | }; |
| 98 | |
| 99 | extern void poll_initwait(struct poll_wqueues *pwq); |
| 100 | extern void poll_freewait(struct poll_wqueues *pwq); |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 101 | extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, |
| 102 | ktime_t *expires, unsigned long slack); |
Deepa Dinamani | 766b9f9 | 2016-05-19 17:09:05 -0700 | [diff] [blame] | 103 | extern u64 select_estimate_accuracy(struct timespec64 *tv); |
Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 104 | |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 105 | |
| 106 | static inline int poll_schedule(struct poll_wqueues *pwq, int state) |
| 107 | { |
| 108 | return poll_schedule_timeout(pwq, state, NULL, 0); |
| 109 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
| 111 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 112 | * Scalable version of the fd_set. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | */ |
| 114 | |
| 115 | typedef struct { |
| 116 | unsigned long *in, *out, *ex; |
| 117 | unsigned long *res_in, *res_out, *res_ex; |
| 118 | } fd_set_bits; |
| 119 | |
| 120 | /* |
| 121 | * How many longwords for "nr" bits? |
| 122 | */ |
| 123 | #define FDS_BITPERLONG (8*sizeof(long)) |
| 124 | #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) |
| 125 | #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) |
| 126 | |
| 127 | /* |
| 128 | * We do a VERIFY_WRITE here even though we are only reading this time: |
| 129 | * we'll write to it eventually.. |
| 130 | * |
| 131 | * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. |
| 132 | */ |
| 133 | static inline |
| 134 | int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) |
| 135 | { |
| 136 | nr = FDS_BYTES(nr); |
| 137 | if (ufdset) |
| 138 | return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; |
| 139 | |
| 140 | memset(fdset, 0, nr); |
| 141 | return 0; |
| 142 | } |
| 143 | |
| 144 | static inline unsigned long __must_check |
| 145 | set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) |
| 146 | { |
| 147 | if (ufdset) |
| 148 | return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); |
| 149 | return 0; |
| 150 | } |
| 151 | |
| 152 | static inline |
| 153 | void zero_fd_set(unsigned long nr, unsigned long *fdset) |
| 154 | { |
| 155 | memset(fdset, 0, FDS_BYTES(nr)); |
| 156 | } |
| 157 | |
David Woodhouse | 9f72949 | 2006-01-18 17:44:05 -0800 | [diff] [blame] | 158 | #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) |
| 159 | |
Deepa Dinamani | 766b9f9 | 2016-05-19 17:09:05 -0700 | [diff] [blame] | 160 | extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time); |
David Woodhouse | 9f72949 | 2006-01-18 17:44:05 -0800 | [diff] [blame] | 161 | extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, |
Deepa Dinamani | 766b9f9 | 2016-05-19 17:09:05 -0700 | [diff] [blame] | 162 | struct timespec64 *end_time); |
Al Viro | a2dcb44 | 2008-04-23 14:05:15 -0400 | [diff] [blame] | 163 | extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, |
Deepa Dinamani | 766b9f9 | 2016-05-19 17:09:05 -0700 | [diff] [blame] | 164 | fd_set __user *exp, struct timespec64 *end_time); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | |
Deepa Dinamani | 766b9f9 | 2016-05-19 17:09:05 -0700 | [diff] [blame] | 166 | extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, |
| 167 | long nsec); |
Thomas Gleixner | b773ad4 | 2008-08-31 08:16:57 -0700 | [diff] [blame] | 168 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | #endif /* _LINUX_POLL_H */ |