blob: fcf6dc766be68a4185fc37364de1d6a60ba32cdd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file contains the procedures for the handling of select and poll
3 *
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
6 *
7 * 4 February 1994
8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 * flag set in its personality we do *not* modify the given timeout
10 * parameter to reflect time remaining.
11 *
12 * 24 January 2000
13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
15 */
16
Milind Arun Choudhary022a1692007-05-08 00:29:02 -070017#include <linux/kernel.h>
Alexey Dobriyana99bbaf2009-10-04 16:11:37 +040018#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/syscalls.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050020#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/poll.h>
23#include <linux/personality.h> /* for STICKY_TIMEOUTS */
24#include <linux/file.h>
Al Viro9f3acc32008-04-24 07:44:08 -040025#include <linux/fdtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/fs.h>
Dipankar Sarmab8359962005-09-09 13:04:14 -070027#include <linux/rcupdate.h>
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -070028#include <linux/hrtimer.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060029#include <linux/sched/rt.h>
Colin Cross9745cdb2013-05-06 23:50:17 +000030#include <linux/freezer.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030031#include <net/busy_poll.h>
Vlastimil Babka2d193092016-10-11 13:51:14 -070032#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <asm/uaccess.h>
35
Arjan van de Ven90d6e242008-09-01 15:55:35 -070036
37/*
38 * Estimate expected accuracy in ns from a timeval.
39 *
40 * After quite a bit of churning around, we've settled on
41 * a simple thing of taking 0.1% of the timeout as the
42 * slack, with a cap of 100 msec.
43 * "nice" tasks get a 0.5% slack instead.
44 *
45 * Consider this comment an open invitation to come up with even
46 * better solutions..
47 */
48
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070049#define MAX_SLACK (100 * NSEC_PER_MSEC)
50
Deepa Dinamani766b9f92016-05-19 17:09:05 -070051static long __estimate_accuracy(struct timespec64 *tv)
Arjan van de Ven90d6e242008-09-01 15:55:35 -070052{
Arjan van de Ven96d2ab42008-09-07 16:08:55 -070053 long slack;
Arjan van de Ven90d6e242008-09-01 15:55:35 -070054 int divfactor = 1000;
55
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070056 if (tv->tv_sec < 0)
57 return 0;
58
Arjan van de Ven4ce105d2008-09-07 15:31:39 -070059 if (task_nice(current) > 0)
Arjan van de Ven90d6e242008-09-01 15:55:35 -070060 divfactor = divfactor / 5;
61
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070062 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
63 return MAX_SLACK;
64
Arjan van de Ven90d6e242008-09-01 15:55:35 -070065 slack = tv->tv_nsec / divfactor;
66 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
67
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070068 if (slack > MAX_SLACK)
69 return MAX_SLACK;
Arjan van de Ven96d2ab42008-09-07 16:08:55 -070070
Arjan van de Ven90d6e242008-09-01 15:55:35 -070071 return slack;
72}
73
Deepa Dinamani766b9f92016-05-19 17:09:05 -070074u64 select_estimate_accuracy(struct timespec64 *tv)
Arjan van de Ven90d6e242008-09-01 15:55:35 -070075{
John Stultzda8b44d2016-03-17 14:20:51 -070076 u64 ret;
Deepa Dinamani766b9f92016-05-19 17:09:05 -070077 struct timespec64 now;
Arjan van de Ven90d6e242008-09-01 15:55:35 -070078
79 /*
80 * Realtime tasks get a slack of 0 for obvious reasons.
81 */
82
Arjan van de Ven4ce105d2008-09-07 15:31:39 -070083 if (rt_task(current))
Arjan van de Ven90d6e242008-09-01 15:55:35 -070084 return 0;
85
Deepa Dinamani766b9f92016-05-19 17:09:05 -070086 ktime_get_ts64(&now);
87 now = timespec64_sub(*tv, now);
Arjan van de Ven90d6e242008-09-01 15:55:35 -070088 ret = __estimate_accuracy(&now);
89 if (ret < current->timer_slack_ns)
90 return current->timer_slack_ns;
91 return ret;
92}
93
94
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096struct poll_table_page {
97 struct poll_table_page * next;
98 struct poll_table_entry * entry;
99 struct poll_table_entry entries[0];
100};
101
102#define POLL_TABLE_FULL(table) \
103 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
104
105/*
106 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
107 * I have rewritten this, taking some shortcuts: This code may not be easy to
108 * follow, but it should be free of race-conditions, and it's practical. If you
109 * understand what I'm doing here, then you understand how the linux
110 * sleep/wakeup mechanism works.
111 *
112 * Two very simple procedures, poll_wait() and poll_freewait() make all the
113 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
114 * as all select/poll functions have to call it to add an entry to the
115 * poll table.
116 */
Adrian Bunk75c96f82005-05-05 16:16:09 -0700117static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
118 poll_table *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120void poll_initwait(struct poll_wqueues *pwq)
121{
122 init_poll_funcptr(&pwq->pt, __pollwait);
Tejun Heo5f820f62009-01-06 14:40:59 -0800123 pwq->polling_task = current;
Guillaume Knispelb2add732009-08-15 19:30:24 +0200124 pwq->triggered = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 pwq->error = 0;
126 pwq->table = NULL;
Andi Kleen70674f92006-03-28 01:56:33 -0800127 pwq->inline_index = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129EXPORT_SYMBOL(poll_initwait);
130
Andi Kleen70674f92006-03-28 01:56:33 -0800131static void free_poll_entry(struct poll_table_entry *entry)
132{
WANG Congccf67802007-05-09 07:10:02 +0200133 remove_wait_queue(entry->wait_address, &entry->wait);
Andi Kleen70674f92006-03-28 01:56:33 -0800134 fput(entry->filp);
135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137void poll_freewait(struct poll_wqueues *pwq)
138{
139 struct poll_table_page * p = pwq->table;
Andi Kleen70674f92006-03-28 01:56:33 -0800140 int i;
141 for (i = 0; i < pwq->inline_index; i++)
142 free_poll_entry(pwq->inline_entries + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 while (p) {
144 struct poll_table_entry * entry;
145 struct poll_table_page *old;
146
147 entry = p->entry;
148 do {
149 entry--;
Andi Kleen70674f92006-03-28 01:56:33 -0800150 free_poll_entry(entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 } while (entry > p->entries);
152 old = p;
153 p = p->next;
154 free_page((unsigned long) old);
155 }
156}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157EXPORT_SYMBOL(poll_freewait);
158
Tejun Heo5f820f62009-01-06 14:40:59 -0800159static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 struct poll_table_page *table = p->table;
162
Andi Kleen70674f92006-03-28 01:56:33 -0800163 if (p->inline_index < N_INLINE_POLL_ENTRIES)
164 return p->inline_entries + p->inline_index++;
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 if (!table || POLL_TABLE_FULL(table)) {
167 struct poll_table_page *new_table;
168
169 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
170 if (!new_table) {
171 p->error = -ENOMEM;
Andi Kleen70674f92006-03-28 01:56:33 -0800172 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 }
174 new_table->entry = new_table->entries;
175 new_table->next = table;
176 p->table = new_table;
177 table = new_table;
178 }
179
Andi Kleen70674f92006-03-28 01:56:33 -0800180 return table->entry++;
181}
182
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700183static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
Tejun Heo5f820f62009-01-06 14:40:59 -0800184{
185 struct poll_wqueues *pwq = wait->private;
186 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
187
188 /*
189 * Although this function is called under waitqueue lock, LOCK
190 * doesn't imply write barrier and the users expect write
191 * barrier semantics on wakeup functions. The following
192 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200193 * and is paired with smp_store_mb() in poll_schedule_timeout.
Tejun Heo5f820f62009-01-06 14:40:59 -0800194 */
195 smp_wmb();
196 pwq->triggered = 1;
197
198 /*
199 * Perform the default wake up operation using a dummy
200 * waitqueue.
201 *
202 * TODO: This is hacky but there currently is no interface to
203 * pass in @sync. @sync is scheduled to be removed and once
204 * that happens, wake_up_process() can be used directly.
205 */
206 return default_wake_function(&dummy_wait, mode, sync, key);
207}
208
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700209static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
210{
211 struct poll_table_entry *entry;
212
213 entry = container_of(wait, struct poll_table_entry, wait);
214 if (key && !((unsigned long)key & entry->key))
215 return 0;
216 return __pollwake(wait, mode, sync, key);
217}
218
Andi Kleen70674f92006-03-28 01:56:33 -0800219/* Add a new entry */
220static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
221 poll_table *p)
222{
Tejun Heo5f820f62009-01-06 14:40:59 -0800223 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
224 struct poll_table_entry *entry = poll_get_entry(pwq);
Andi Kleen70674f92006-03-28 01:56:33 -0800225 if (!entry)
226 return;
Al Virocb0942b2012-08-27 14:48:26 -0400227 entry->filp = get_file(filp);
Andi Kleen70674f92006-03-28 01:56:33 -0800228 entry->wait_address = wait_address;
Hans Verkuil626cf232012-03-23 15:02:27 -0700229 entry->key = p->_key;
Tejun Heo5f820f62009-01-06 14:40:59 -0800230 init_waitqueue_func_entry(&entry->wait, pollwake);
231 entry->wait.private = pwq;
WANG Congccf67802007-05-09 07:10:02 +0200232 add_wait_queue(wait_address, &entry->wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233}
234
Tejun Heo5f820f62009-01-06 14:40:59 -0800235int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
236 ktime_t *expires, unsigned long slack)
237{
238 int rc = -EINTR;
239
240 set_current_state(state);
241 if (!pwq->triggered)
Colin Cross1d63aa42013-05-06 23:50:17 +0000242 rc = freezable_schedule_hrtimeout_range(expires, slack,
243 HRTIMER_MODE_ABS);
Tejun Heo5f820f62009-01-06 14:40:59 -0800244 __set_current_state(TASK_RUNNING);
245
246 /*
247 * Prepare for the next iteration.
248 *
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200249 * The following smp_store_mb() serves two purposes. First, it's
Tejun Heo5f820f62009-01-06 14:40:59 -0800250 * the counterpart rmb of the wmb in pollwake() such that data
251 * written before wake up is always visible after wake up.
252 * Second, the full barrier guarantees that triggered clearing
253 * doesn't pass event check of the next iteration. Note that
254 * this problem doesn't exist for the first iteration as
255 * add_wait_queue() has full barrier semantics.
256 */
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200257 smp_store_mb(pwq->triggered, 0);
Tejun Heo5f820f62009-01-06 14:40:59 -0800258
259 return rc;
260}
261EXPORT_SYMBOL(poll_schedule_timeout);
262
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700263/**
264 * poll_select_set_timeout - helper function to setup the timeout value
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700265 * @to: pointer to timespec64 variable for the final timeout
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700266 * @sec: seconds (from user space)
267 * @nsec: nanoseconds (from user space)
268 *
269 * Note, we do not use a timespec for the user space value here, That
270 * way we can use the function for timeval and compat interfaces as well.
271 *
272 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
273 */
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700274int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700275{
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700276 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700277
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700278 if (!timespec64_valid(&ts))
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700279 return -EINVAL;
280
281 /* Optimize for the zero timeout value here */
282 if (!sec && !nsec) {
283 to->tv_sec = to->tv_nsec = 0;
284 } else {
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700285 ktime_get_ts64(to);
286 *to = timespec64_add_safe(*to, ts);
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700287 }
288 return 0;
289}
290
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700291static int poll_select_copy_remaining(struct timespec64 *end_time,
292 void __user *p,
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700293 int timeval, int ret)
294{
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700295 struct timespec64 rts64;
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700296 struct timespec rts;
297 struct timeval rtv;
298
299 if (!p)
300 return ret;
301
302 if (current->personality & STICKY_TIMEOUTS)
303 goto sticky;
304
305 /* No update for zero timeout */
306 if (!end_time->tv_sec && !end_time->tv_nsec)
307 return ret;
308
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700309 ktime_get_ts64(&rts64);
310 rts64 = timespec64_sub(*end_time, rts64);
311 if (rts64.tv_sec < 0)
312 rts64.tv_sec = rts64.tv_nsec = 0;
313
314 rts = timespec64_to_timespec(rts64);
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700315
316 if (timeval) {
Vasiliy Kulikov65329bf2011-01-12 17:00:00 -0800317 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
318 memset(&rtv, 0, sizeof(rtv));
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700319 rtv.tv_sec = rts64.tv_sec;
320 rtv.tv_usec = rts64.tv_nsec / NSEC_PER_USEC;
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700321
322 if (!copy_to_user(p, &rtv, sizeof(rtv)))
323 return ret;
324
325 } else if (!copy_to_user(p, &rts, sizeof(rts)))
326 return ret;
327
328 /*
329 * If an application puts its timeval in read-only memory, we
330 * don't want the Linux-specific update to the timeval to
331 * cause a fault after the select has completed
332 * successfully. However, because we're not updating the
333 * timeval, we can't restart the system call.
334 */
335
336sticky:
337 if (ret == -ERESTARTNOHAND)
338 ret = -EINTR;
339 return ret;
340}
341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342#define FDS_IN(fds, n) (fds->in + n)
343#define FDS_OUT(fds, n) (fds->out + n)
344#define FDS_EX(fds, n) (fds->ex + n)
345
346#define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
347
348static int max_select_fd(unsigned long n, fd_set_bits *fds)
349{
350 unsigned long *open_fds;
351 unsigned long set;
352 int max;
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700353 struct fdtable *fdt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
355 /* handle last in-complete long-word first */
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400356 set = ~(~0UL << (n & (BITS_PER_LONG-1)));
357 n /= BITS_PER_LONG;
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700358 fdt = files_fdtable(current->files);
David Howells1fd36ad2012-02-16 17:49:54 +0000359 open_fds = fdt->open_fds + n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 max = 0;
361 if (set) {
362 set &= BITS(fds, n);
363 if (set) {
364 if (!(set & ~*open_fds))
365 goto get_max;
366 return -EBADF;
367 }
368 }
369 while (n) {
370 open_fds--;
371 n--;
372 set = BITS(fds, n);
373 if (!set)
374 continue;
375 if (set & ~*open_fds)
376 return -EBADF;
377 if (max)
378 continue;
379get_max:
380 do {
381 max++;
382 set >>= 1;
383 } while (set);
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400384 max += n * BITS_PER_LONG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386
387 return max;
388}
389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
391#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
392#define POLLEX_SET (POLLPRI)
393
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700394static inline void wait_key_set(poll_table *wait, unsigned long in,
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300395 unsigned long out, unsigned long bit,
396 unsigned int ll_flag)
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700397{
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300398 wait->_key = POLLEX_SET | ll_flag;
Hans Verkuil626cf232012-03-23 15:02:27 -0700399 if (in & bit)
400 wait->_key |= POLLIN_SET;
401 if (out & bit)
402 wait->_key |= POLLOUT_SET;
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700403}
404
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700405int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700407 ktime_t expire, *to = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 struct poll_wqueues table;
409 poll_table *wait;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700410 int retval, i, timed_out = 0;
John Stultzda8b44d2016-03-17 14:20:51 -0700411 u64 slack = 0;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300412 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300413 unsigned long busy_end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
Dipankar Sarmab8359962005-09-09 13:04:14 -0700415 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 retval = max_select_fd(n, fds);
Dipankar Sarmab8359962005-09-09 13:04:14 -0700417 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 if (retval < 0)
420 return retval;
421 n = retval;
422
423 poll_initwait(&table);
424 wait = &table.pt;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700425 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
Hans Verkuil626cf232012-03-23 15:02:27 -0700426 wait->_qproc = NULL;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700427 timed_out = 1;
428 }
429
Arjan van de Ven96d2ab42008-09-07 16:08:55 -0700430 if (end_time && !timed_out)
Andrew Morton231f3d32010-10-27 15:34:53 -0700431 slack = select_estimate_accuracy(end_time);
Arjan van de Ven90d6e242008-09-01 15:55:35 -0700432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 retval = 0;
434 for (;;) {
435 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300436 bool can_busy_loop = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 inp = fds->in; outp = fds->out; exp = fds->ex;
439 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
440
441 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
442 unsigned long in, out, ex, all_bits, bit = 1, mask, j;
443 unsigned long res_in = 0, res_out = 0, res_ex = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
445 in = *inp++; out = *outp++; ex = *exp++;
446 all_bits = in | out | ex;
447 if (all_bits == 0) {
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400448 i += BITS_PER_LONG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 continue;
450 }
451
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400452 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
Al Viro2903ff02012-08-28 12:52:22 -0400453 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 if (i >= n)
455 break;
456 if (!(bit & all_bits))
457 continue;
Al Viro2903ff02012-08-28 12:52:22 -0400458 f = fdget(i);
459 if (f.file) {
460 const struct file_operations *f_op;
461 f_op = f.file->f_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 mask = DEFAULT_POLLMASK;
Al Viro72c2d532013-09-22 16:27:52 -0400463 if (f_op->poll) {
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300464 wait_key_set(wait, in, out,
Eliezer Tamircbf55002013-07-08 16:20:34 +0300465 bit, busy_flag);
Al Viro2903ff02012-08-28 12:52:22 -0400466 mask = (*f_op->poll)(f.file, wait);
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700467 }
Al Viro2903ff02012-08-28 12:52:22 -0400468 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 if ((mask & POLLIN_SET) && (in & bit)) {
470 res_in |= bit;
471 retval++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700472 wait->_qproc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
474 if ((mask & POLLOUT_SET) && (out & bit)) {
475 res_out |= bit;
476 retval++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700477 wait->_qproc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 }
479 if ((mask & POLLEX_SET) && (ex & bit)) {
480 res_ex |= bit;
481 retval++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700482 wait->_qproc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 }
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300484 /* got something, stop busy polling */
Eliezer Tamircbf55002013-07-08 16:20:34 +0300485 if (retval) {
486 can_busy_loop = false;
487 busy_flag = 0;
488
489 /*
490 * only remember a returned
491 * POLL_BUSY_LOOP if we asked for it
492 */
493 } else if (busy_flag & mask)
494 can_busy_loop = true;
495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 }
498 if (res_in)
499 *rinp = res_in;
500 if (res_out)
501 *routp = res_out;
502 if (res_ex)
503 *rexp = res_ex;
Linus Torvalds55d85382008-06-22 12:23:15 -0700504 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 }
Hans Verkuil626cf232012-03-23 15:02:27 -0700506 wait->_qproc = NULL;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700507 if (retval || timed_out || signal_pending(current))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 break;
Pavel Machekf5264482008-04-21 22:15:06 +0000509 if (table.error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 retval = table.error;
511 break;
512 }
David Woodhouse9f729492006-01-18 17:44:05 -0800513
Eliezer Tamircbf55002013-07-08 16:20:34 +0300514 /* only if found POLL_BUSY_LOOP sockets && not out of time */
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300515 if (can_busy_loop && !need_resched()) {
516 if (!busy_end) {
517 busy_end = busy_loop_end_time();
518 continue;
519 }
520 if (!busy_loop_timeout(busy_end))
521 continue;
522 }
523 busy_flag = 0;
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300524
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700525 /*
526 * If this is the first loop and we have a timeout
527 * given, then we convert to ktime_t and set the to
528 * pointer to the expiry value.
529 */
530 if (end_time && !to) {
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700531 expire = timespec64_to_ktime(*end_time);
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700532 to = &expire;
David Woodhouse9f729492006-01-18 17:44:05 -0800533 }
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700534
Tejun Heo5f820f62009-01-06 14:40:59 -0800535 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
536 to, slack))
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700537 timed_out = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
540 poll_freewait(&table);
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 return retval;
543}
544
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545/*
546 * We can actually return ERESTARTSYS instead of EINTR, but I'd
547 * like to be certain this leads to no problems. So I return
548 * EINTR just for safety.
549 *
550 * Update: ERESTARTSYS breaks at least the xview clock binary, so
551 * I'm trying ERESTARTNOHAND which restart only when you want to.
552 */
Al Viroa2dcb442008-04-23 14:05:15 -0400553int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700554 fd_set __user *exp, struct timespec64 *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555{
556 fd_set_bits fds;
Andrew Morton29ff2db2006-04-10 22:52:46 -0700557 void *bits;
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800558 int ret, max_fds;
Vlastimil Babka2d193092016-10-11 13:51:14 -0700559 size_t size, alloc_size;
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700560 struct fdtable *fdt;
Andi Kleen70674f92006-03-28 01:56:33 -0800561 /* Allocate small arguments on the stack to save memory and be faster */
Jes Sorensen30c14e42006-03-31 11:18:57 -0500562 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 ret = -EINVAL;
565 if (n < 0)
566 goto out_nofds;
567
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800568 /* max_fds can increase, so grab it once to avoid race */
Dipankar Sarmab8359962005-09-09 13:04:14 -0700569 rcu_read_lock();
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700570 fdt = files_fdtable(current->files);
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800571 max_fds = fdt->max_fds;
Dipankar Sarmab8359962005-09-09 13:04:14 -0700572 rcu_read_unlock();
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800573 if (n > max_fds)
574 n = max_fds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
576 /*
577 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
578 * since we used fdset we need to allocate memory in units of
579 * long-words.
580 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 size = FDS_BYTES(n);
Mitchell Blank Jrb04eb6a2006-04-10 22:54:08 -0700582 bits = stack_fds;
583 if (size > sizeof(stack_fds) / 6) {
584 /* Not enough space in on-stack array; must use kmalloc */
585 ret = -ENOMEM;
Vlastimil Babka2d193092016-10-11 13:51:14 -0700586 if (size > (SIZE_MAX / 6))
587 goto out_nofds;
588
589 alloc_size = 6 * size;
590 bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
591 if (!bits && alloc_size > PAGE_SIZE)
592 bits = vmalloc(alloc_size);
593
Mitchell Blank Jrb04eb6a2006-04-10 22:54:08 -0700594 if (!bits)
595 goto out_nofds;
596 }
Andrew Morton29ff2db2006-04-10 22:52:46 -0700597 fds.in = bits;
598 fds.out = bits + size;
599 fds.ex = bits + 2*size;
600 fds.res_in = bits + 3*size;
601 fds.res_out = bits + 4*size;
602 fds.res_ex = bits + 5*size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
604 if ((ret = get_fd_set(n, inp, fds.in)) ||
605 (ret = get_fd_set(n, outp, fds.out)) ||
606 (ret = get_fd_set(n, exp, fds.ex)))
607 goto out;
608 zero_fd_set(n, fds.res_in);
609 zero_fd_set(n, fds.res_out);
610 zero_fd_set(n, fds.res_ex);
611
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700612 ret = do_select(n, &fds, end_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 if (ret < 0)
615 goto out;
616 if (!ret) {
617 ret = -ERESTARTNOHAND;
618 if (signal_pending(current))
619 goto out;
620 ret = 0;
621 }
622
623 if (set_fd_set(n, inp, fds.res_in) ||
624 set_fd_set(n, outp, fds.res_out) ||
625 set_fd_set(n, exp, fds.res_ex))
626 ret = -EFAULT;
627
628out:
Andi Kleen70674f92006-03-28 01:56:33 -0800629 if (bits != stack_fds)
Vlastimil Babka2d193092016-10-11 13:51:14 -0700630 kvfree(bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631out_nofds:
632 return ret;
633}
634
Heiko Carstens5a8a82b2009-01-14 14:14:25 +0100635SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
636 fd_set __user *, exp, struct timeval __user *, tvp)
David Woodhouse9f729492006-01-18 17:44:05 -0800637{
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700638 struct timespec64 end_time, *to = NULL;
David Woodhouse9f729492006-01-18 17:44:05 -0800639 struct timeval tv;
640 int ret;
641
642 if (tvp) {
643 if (copy_from_user(&tv, tvp, sizeof(tv)))
644 return -EFAULT;
645
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700646 to = &end_time;
Arjan van de Ven4d36a9e2008-10-25 12:41:41 -0700647 if (poll_select_set_timeout(to,
648 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
649 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
David Woodhouse9f729492006-01-18 17:44:05 -0800650 return -EINVAL;
David Woodhouse9f729492006-01-18 17:44:05 -0800651 }
652
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700653 ret = core_sys_select(n, inp, outp, exp, to);
654 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
David Woodhouse9f729492006-01-18 17:44:05 -0800655
656 return ret;
657}
658
Heiko Carstensc9da9f22009-01-14 14:13:57 +0100659static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
660 fd_set __user *exp, struct timespec __user *tsp,
661 const sigset_t __user *sigmask, size_t sigsetsize)
David Woodhouse9f729492006-01-18 17:44:05 -0800662{
David Woodhouse9f729492006-01-18 17:44:05 -0800663 sigset_t ksigmask, sigsaved;
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700664 struct timespec ts;
665 struct timespec64 ts64, end_time, *to = NULL;
David Woodhouse9f729492006-01-18 17:44:05 -0800666 int ret;
667
668 if (tsp) {
669 if (copy_from_user(&ts, tsp, sizeof(ts)))
670 return -EFAULT;
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700671 ts64 = timespec_to_timespec64(ts);
David Woodhouse9f729492006-01-18 17:44:05 -0800672
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700673 to = &end_time;
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700674 if (poll_select_set_timeout(to, ts64.tv_sec, ts64.tv_nsec))
David Woodhouse9f729492006-01-18 17:44:05 -0800675 return -EINVAL;
David Woodhouse9f729492006-01-18 17:44:05 -0800676 }
677
678 if (sigmask) {
679 /* XXX: Don't preclude handling different sized sigset_t's. */
680 if (sigsetsize != sizeof(sigset_t))
681 return -EINVAL;
682 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
683 return -EFAULT;
684
685 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
686 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
687 }
688
Bernd Schmidt62568512009-01-13 22:14:48 +0100689 ret = core_sys_select(n, inp, outp, exp, to);
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700690 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
David Woodhouse9f729492006-01-18 17:44:05 -0800691
692 if (ret == -ERESTARTNOHAND) {
693 /*
694 * Don't restore the signal mask yet. Let do_signal() deliver
695 * the signal on the way back to userspace, before the signal
696 * mask is restored.
697 */
698 if (sigmask) {
699 memcpy(&current->saved_sigmask, &sigsaved,
700 sizeof(sigsaved));
Roland McGrath4e4c22c2008-04-30 00:53:06 -0700701 set_restore_sigmask();
David Woodhouse9f729492006-01-18 17:44:05 -0800702 }
703 } else if (sigmask)
704 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
705
706 return ret;
707}
708
709/*
710 * Most architectures can't handle 7-argument syscalls. So we provide a
711 * 6-argument version where the sixth argument is a pointer to a structure
712 * which has a pointer to the sigset_t itself followed by a size_t containing
713 * the sigset size.
714 */
Heiko Carstensd4e82042009-01-14 14:14:34 +0100715SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
716 fd_set __user *, exp, struct timespec __user *, tsp,
717 void __user *, sig)
David Woodhouse9f729492006-01-18 17:44:05 -0800718{
719 size_t sigsetsize = 0;
720 sigset_t __user *up = NULL;
721
722 if (sig) {
723 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
Al Viroe110ab92006-02-01 05:26:09 -0500724 || __get_user(up, (sigset_t __user * __user *)sig)
David Woodhouse9f729492006-01-18 17:44:05 -0800725 || __get_user(sigsetsize,
Al Viroe110ab92006-02-01 05:26:09 -0500726 (size_t __user *)(sig+sizeof(void *))))
David Woodhouse9f729492006-01-18 17:44:05 -0800727 return -EFAULT;
728 }
729
Heiko Carstensc9da9f22009-01-14 14:13:57 +0100730 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
David Woodhouse9f729492006-01-18 17:44:05 -0800731}
David Woodhouse9f729492006-01-18 17:44:05 -0800732
Christoph Hellwig5d0e5282010-03-10 15:21:13 -0800733#ifdef __ARCH_WANT_SYS_OLD_SELECT
734struct sel_arg_struct {
735 unsigned long n;
736 fd_set __user *inp, *outp, *exp;
737 struct timeval __user *tvp;
738};
739
740SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
741{
742 struct sel_arg_struct a;
743
744 if (copy_from_user(&a, arg, sizeof(a)))
745 return -EFAULT;
746 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
747}
748#endif
749
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750struct poll_list {
751 struct poll_list *next;
752 int len;
753 struct pollfd entries[0];
754};
755
756#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
757
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700758/*
759 * Fish for pollable events on the pollfd->fd file descriptor. We're only
760 * interested in events matching the pollfd->events mask, and the result
761 * matching that mask is both recorded in pollfd->revents and returned. The
762 * pwait poll_table will be used by the fd-provided poll handler for waiting,
Hans Verkuil626cf232012-03-23 15:02:27 -0700763 * if pwait->_qproc is non-NULL.
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700764 */
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300765static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
Eliezer Tamircbf55002013-07-08 16:20:34 +0300766 bool *can_busy_poll,
767 unsigned int busy_flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768{
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700769 unsigned int mask;
770 int fd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700772 mask = 0;
773 fd = pollfd->fd;
774 if (fd >= 0) {
Al Viro2903ff02012-08-28 12:52:22 -0400775 struct fd f = fdget(fd);
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700776 mask = POLLNVAL;
Al Viro2903ff02012-08-28 12:52:22 -0400777 if (f.file) {
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700778 mask = DEFAULT_POLLMASK;
Al Viro72c2d532013-09-22 16:27:52 -0400779 if (f.file->f_op->poll) {
Hans Verkuil626cf232012-03-23 15:02:27 -0700780 pwait->_key = pollfd->events|POLLERR|POLLHUP;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300781 pwait->_key |= busy_flag;
Al Viro2903ff02012-08-28 12:52:22 -0400782 mask = f.file->f_op->poll(f.file, pwait);
Eliezer Tamircbf55002013-07-08 16:20:34 +0300783 if (mask & busy_flag)
784 *can_busy_poll = true;
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700785 }
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700786 /* Mask out unneeded events. */
787 mask &= pollfd->events | POLLERR | POLLHUP;
Al Viro2903ff02012-08-28 12:52:22 -0400788 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 }
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700791 pollfd->revents = mask;
792
793 return mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794}
795
Mateusz Guzikccec5ee2016-01-06 06:41:53 +0100796static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700797 struct timespec64 *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 poll_table* pt = &wait->pt;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700800 ktime_t expire, *to = NULL;
801 int timed_out = 0, count = 0;
John Stultzda8b44d2016-03-17 14:20:51 -0700802 u64 slack = 0;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300803 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300804 unsigned long busy_end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
David Woodhouse9f729492006-01-18 17:44:05 -0800806 /* Optimise the no-wait case */
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700807 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
Hans Verkuil626cf232012-03-23 15:02:27 -0700808 pt->_qproc = NULL;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700809 timed_out = 1;
810 }
Oleg Nesterov9bf084f2007-10-16 23:26:18 -0700811
Arjan van de Ven96d2ab42008-09-07 16:08:55 -0700812 if (end_time && !timed_out)
Andrew Morton231f3d32010-10-27 15:34:53 -0700813 slack = select_estimate_accuracy(end_time);
Arjan van de Ven90d6e242008-09-01 15:55:35 -0700814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 for (;;) {
816 struct poll_list *walk;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300817 bool can_busy_loop = false;
David Woodhouse9f729492006-01-18 17:44:05 -0800818
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700819 for (walk = list; walk != NULL; walk = walk->next) {
820 struct pollfd * pfd, * pfd_end;
821
822 pfd = walk->entries;
823 pfd_end = pfd + walk->len;
824 for (; pfd != pfd_end; pfd++) {
825 /*
826 * Fish for events. If we found one, record it
Hans Verkuil626cf232012-03-23 15:02:27 -0700827 * and kill poll_table->_qproc, so we don't
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700828 * needlessly register any other waiters after
829 * this. They'll get immediately deregistered
830 * when we break out and return.
831 */
Eliezer Tamircbf55002013-07-08 16:20:34 +0300832 if (do_pollfd(pfd, pt, &can_busy_loop,
833 busy_flag)) {
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700834 count++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700835 pt->_qproc = NULL;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300836 /* found something, stop busy polling */
837 busy_flag = 0;
838 can_busy_loop = false;
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700839 }
840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 }
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700842 /*
843 * All waiters have already been registered, so don't provide
Hans Verkuil626cf232012-03-23 15:02:27 -0700844 * a poll_table->_qproc to them on the next loop iteration.
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700845 */
Hans Verkuil626cf232012-03-23 15:02:27 -0700846 pt->_qproc = NULL;
Oleg Nesterov9bf084f2007-10-16 23:26:18 -0700847 if (!count) {
848 count = wait->error;
849 if (signal_pending(current))
850 count = -EINTR;
851 }
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700852 if (count || timed_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 break;
David Woodhouse9f729492006-01-18 17:44:05 -0800854
Eliezer Tamircbf55002013-07-08 16:20:34 +0300855 /* only if found POLL_BUSY_LOOP sockets && not out of time */
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300856 if (can_busy_loop && !need_resched()) {
857 if (!busy_end) {
858 busy_end = busy_loop_end_time();
859 continue;
860 }
861 if (!busy_loop_timeout(busy_end))
862 continue;
863 }
864 busy_flag = 0;
Eliezer Tamir91e2fd332013-06-28 15:59:35 +0300865
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700866 /*
867 * If this is the first loop and we have a timeout
868 * given, then we convert to ktime_t and set the to
869 * pointer to the expiry value.
870 */
871 if (end_time && !to) {
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700872 expire = timespec64_to_ktime(*end_time);
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700873 to = &expire;
David Woodhouse9f729492006-01-18 17:44:05 -0800874 }
875
Tejun Heo5f820f62009-01-06 14:40:59 -0800876 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700877 timed_out = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 return count;
880}
881
Andi Kleen70674f92006-03-28 01:56:33 -0800882#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
883 sizeof(struct pollfd))
884
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700885int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700886 struct timespec64 *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887{
888 struct poll_wqueues table;
Oleg Nesterov252e5722007-10-16 23:26:17 -0700889 int err = -EFAULT, fdcount, len, size;
Jes Sorensen30c14e42006-03-31 11:18:57 -0500890 /* Allocate small arguments on the stack to save memory and be
891 faster - use long to make sure the buffer is aligned properly
892 on 64 bit archs to avoid unaligned access */
893 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
Oleg Nesterov252e5722007-10-16 23:26:17 -0700894 struct poll_list *const head = (struct poll_list *)stack_pps;
895 struct poll_list *walk = head;
896 unsigned long todo = nfds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
Jiri Slabyd554ed892010-03-05 13:42:42 -0800898 if (nfds > rlimit(RLIMIT_NOFILE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 return -EINVAL;
900
Oleg Nesterov252e5722007-10-16 23:26:17 -0700901 len = min_t(unsigned int, nfds, N_STACK_PPS);
902 for (;;) {
903 walk->next = NULL;
904 walk->len = len;
905 if (!len)
906 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
Oleg Nesterov252e5722007-10-16 23:26:17 -0700908 if (copy_from_user(walk->entries, ufds + nfds-todo,
909 sizeof(struct pollfd) * walk->len))
910 goto out_fds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Oleg Nesterov252e5722007-10-16 23:26:17 -0700912 todo -= walk->len;
913 if (!todo)
914 break;
915
916 len = min(todo, POLLFD_PER_PAGE);
917 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
918 walk = walk->next = kmalloc(size, GFP_KERNEL);
919 if (!walk) {
920 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 goto out_fds;
922 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 }
David Woodhouse9f729492006-01-18 17:44:05 -0800924
Oleg Nesterov252e5722007-10-16 23:26:17 -0700925 poll_initwait(&table);
Mateusz Guzikccec5ee2016-01-06 06:41:53 +0100926 fdcount = do_poll(head, &table, end_time);
Oleg Nesterov252e5722007-10-16 23:26:17 -0700927 poll_freewait(&table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
Oleg Nesterov252e5722007-10-16 23:26:17 -0700929 for (walk = head; walk; walk = walk->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 struct pollfd *fds = walk->entries;
931 int j;
932
Oleg Nesterov252e5722007-10-16 23:26:17 -0700933 for (j = 0; j < walk->len; j++, ufds++)
934 if (__put_user(fds[j].revents, &ufds->revents))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 goto out_fds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 }
Oleg Nesterov252e5722007-10-16 23:26:17 -0700937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 err = fdcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939out_fds:
Oleg Nesterov252e5722007-10-16 23:26:17 -0700940 walk = head->next;
941 while (walk) {
942 struct poll_list *pos = walk;
943 walk = walk->next;
944 kfree(pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 }
Oleg Nesterov252e5722007-10-16 23:26:17 -0700946
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 return err;
948}
David Woodhouse9f729492006-01-18 17:44:05 -0800949
Chris Wright3075d9d2007-10-16 23:27:18 -0700950static long do_restart_poll(struct restart_block *restart_block)
951{
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700952 struct pollfd __user *ufds = restart_block->poll.ufds;
953 int nfds = restart_block->poll.nfds;
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700954 struct timespec64 *to = NULL, end_time;
Chris Wright3075d9d2007-10-16 23:27:18 -0700955 int ret;
956
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700957 if (restart_block->poll.has_timeout) {
958 end_time.tv_sec = restart_block->poll.tv_sec;
959 end_time.tv_nsec = restart_block->poll.tv_nsec;
960 to = &end_time;
961 }
962
963 ret = do_sys_poll(ufds, nfds, to);
964
Oleg Nesterov376a76a2021-02-01 18:46:41 +0100965 if (ret == -EINTR)
966 ret = set_restart_fn(restart_block, do_restart_poll);
967
Chris Wright3075d9d2007-10-16 23:27:18 -0700968 return ret;
969}
970
Heiko Carstens5a8a82b2009-01-14 14:14:25 +0100971SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
Linus Torvaldsfaf30902012-02-21 17:24:20 -0800972 int, timeout_msecs)
David Woodhouse9f729492006-01-18 17:44:05 -0800973{
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700974 struct timespec64 end_time, *to = NULL;
Chris Wright3075d9d2007-10-16 23:27:18 -0700975 int ret;
David Woodhouse9f729492006-01-18 17:44:05 -0800976
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700977 if (timeout_msecs >= 0) {
978 to = &end_time;
979 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
980 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
David Woodhouse9f729492006-01-18 17:44:05 -0800981 }
982
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700983 ret = do_sys_poll(ufds, nfds, to);
984
Chris Wright3075d9d2007-10-16 23:27:18 -0700985 if (ret == -EINTR) {
986 struct restart_block *restart_block;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700987
Andy Lutomirskif56141e2015-02-12 15:01:14 -0800988 restart_block = &current->restart_block;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700989 restart_block->poll.ufds = ufds;
990 restart_block->poll.nfds = nfds;
991
992 if (timeout_msecs >= 0) {
993 restart_block->poll.tv_sec = end_time.tv_sec;
994 restart_block->poll.tv_nsec = end_time.tv_nsec;
995 restart_block->poll.has_timeout = 1;
996 } else
997 restart_block->poll.has_timeout = 0;
998
Oleg Nesterov376a76a2021-02-01 18:46:41 +0100999 ret = set_restart_fn(restart_block, do_restart_poll);
Chris Wright3075d9d2007-10-16 23:27:18 -07001000 }
1001 return ret;
David Woodhouse9f729492006-01-18 17:44:05 -08001002}
1003
Heiko Carstensd4e82042009-01-14 14:14:34 +01001004SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1005 struct timespec __user *, tsp, const sigset_t __user *, sigmask,
1006 size_t, sigsetsize)
David Woodhouse9f729492006-01-18 17:44:05 -08001007{
1008 sigset_t ksigmask, sigsaved;
Deepa Dinamani766b9f92016-05-19 17:09:05 -07001009 struct timespec ts;
1010 struct timespec64 end_time, *to = NULL;
David Woodhouse9f729492006-01-18 17:44:05 -08001011 int ret;
1012
1013 if (tsp) {
1014 if (copy_from_user(&ts, tsp, sizeof(ts)))
1015 return -EFAULT;
1016
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001017 to = &end_time;
1018 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1019 return -EINVAL;
David Woodhouse9f729492006-01-18 17:44:05 -08001020 }
1021
1022 if (sigmask) {
1023 /* XXX: Don't preclude handling different sized sigset_t's. */
1024 if (sigsetsize != sizeof(sigset_t))
1025 return -EINVAL;
1026 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1027 return -EFAULT;
1028
1029 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1030 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1031 }
1032
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001033 ret = do_sys_poll(ufds, nfds, to);
David Woodhouse9f729492006-01-18 17:44:05 -08001034
1035 /* We can restart this syscall, usually */
1036 if (ret == -EINTR) {
1037 /*
1038 * Don't restore the signal mask yet. Let do_signal() deliver
1039 * the signal on the way back to userspace, before the signal
1040 * mask is restored.
1041 */
1042 if (sigmask) {
1043 memcpy(&current->saved_sigmask, &sigsaved,
1044 sizeof(sigsaved));
Roland McGrath4e4c22c2008-04-30 00:53:06 -07001045 set_restore_sigmask();
David Woodhouse9f729492006-01-18 17:44:05 -08001046 }
1047 ret = -ERESTARTNOHAND;
1048 } else if (sigmask)
1049 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1050
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001051 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
David Woodhouse9f729492006-01-18 17:44:05 -08001052
1053 return ret;
1054}