blob: 50a804b6839fc1e2d58dc9dc7612b1403f7eb1b9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file contains the procedures for the handling of select and poll
3 *
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
6 *
7 * 4 February 1994
8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 * flag set in its personality we do *not* modify the given timeout
10 * parameter to reflect time remaining.
11 *
12 * 24 January 2000
13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
15 */
16
Milind Arun Choudhary022a1692007-05-08 00:29:02 -070017#include <linux/kernel.h>
Alexey Dobriyana99bbaf2009-10-04 16:11:37 +040018#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/syscalls.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050020#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/poll.h>
23#include <linux/personality.h> /* for STICKY_TIMEOUTS */
24#include <linux/file.h>
Al Viro9f3acc32008-04-24 07:44:08 -040025#include <linux/fdtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/fs.h>
Dipankar Sarmab8359962005-09-09 13:04:14 -070027#include <linux/rcupdate.h>
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -070028#include <linux/hrtimer.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060029#include <linux/sched/rt.h>
Eliezer Tamir2d48d672013-06-24 10:28:03 +030030#include <net/ll_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#include <asm/uaccess.h>
33
Arjan van de Ven90d6e242008-09-01 15:55:35 -070034
35/*
36 * Estimate expected accuracy in ns from a timeval.
37 *
38 * After quite a bit of churning around, we've settled on
39 * a simple thing of taking 0.1% of the timeout as the
40 * slack, with a cap of 100 msec.
41 * "nice" tasks get a 0.5% slack instead.
42 *
43 * Consider this comment an open invitation to come up with even
44 * better solutions..
45 */
46
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070047#define MAX_SLACK (100 * NSEC_PER_MSEC)
48
Arjan van de Ven96d2ab42008-09-07 16:08:55 -070049static long __estimate_accuracy(struct timespec *tv)
Arjan van de Ven90d6e242008-09-01 15:55:35 -070050{
Arjan van de Ven96d2ab42008-09-07 16:08:55 -070051 long slack;
Arjan van de Ven90d6e242008-09-01 15:55:35 -070052 int divfactor = 1000;
53
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070054 if (tv->tv_sec < 0)
55 return 0;
56
Arjan van de Ven4ce105d2008-09-07 15:31:39 -070057 if (task_nice(current) > 0)
Arjan van de Ven90d6e242008-09-01 15:55:35 -070058 divfactor = divfactor / 5;
59
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070060 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
61 return MAX_SLACK;
62
Arjan van de Ven90d6e242008-09-01 15:55:35 -070063 slack = tv->tv_nsec / divfactor;
64 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
65
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070066 if (slack > MAX_SLACK)
67 return MAX_SLACK;
Arjan van de Ven96d2ab42008-09-07 16:08:55 -070068
Arjan van de Ven90d6e242008-09-01 15:55:35 -070069 return slack;
70}
71
Shawn Bohrer95aac7b2010-10-27 15:34:54 -070072long select_estimate_accuracy(struct timespec *tv)
Arjan van de Ven90d6e242008-09-01 15:55:35 -070073{
74 unsigned long ret;
75 struct timespec now;
76
77 /*
78 * Realtime tasks get a slack of 0 for obvious reasons.
79 */
80
Arjan van de Ven4ce105d2008-09-07 15:31:39 -070081 if (rt_task(current))
Arjan van de Ven90d6e242008-09-01 15:55:35 -070082 return 0;
83
84 ktime_get_ts(&now);
85 now = timespec_sub(*tv, now);
86 ret = __estimate_accuracy(&now);
87 if (ret < current->timer_slack_ns)
88 return current->timer_slack_ns;
89 return ret;
90}
91
92
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094struct poll_table_page {
95 struct poll_table_page * next;
96 struct poll_table_entry * entry;
97 struct poll_table_entry entries[0];
98};
99
100#define POLL_TABLE_FULL(table) \
101 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
102
103/*
104 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
105 * I have rewritten this, taking some shortcuts: This code may not be easy to
106 * follow, but it should be free of race-conditions, and it's practical. If you
107 * understand what I'm doing here, then you understand how the linux
108 * sleep/wakeup mechanism works.
109 *
110 * Two very simple procedures, poll_wait() and poll_freewait() make all the
111 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
112 * as all select/poll functions have to call it to add an entry to the
113 * poll table.
114 */
Adrian Bunk75c96f82005-05-05 16:16:09 -0700115static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
116 poll_table *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118void poll_initwait(struct poll_wqueues *pwq)
119{
120 init_poll_funcptr(&pwq->pt, __pollwait);
Tejun Heo5f820f62009-01-06 14:40:59 -0800121 pwq->polling_task = current;
Guillaume Knispelb2add732009-08-15 19:30:24 +0200122 pwq->triggered = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 pwq->error = 0;
124 pwq->table = NULL;
Andi Kleen70674f92006-03-28 01:56:33 -0800125 pwq->inline_index = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127EXPORT_SYMBOL(poll_initwait);
128
Andi Kleen70674f92006-03-28 01:56:33 -0800129static void free_poll_entry(struct poll_table_entry *entry)
130{
WANG Congccf67802007-05-09 07:10:02 +0200131 remove_wait_queue(entry->wait_address, &entry->wait);
Andi Kleen70674f92006-03-28 01:56:33 -0800132 fput(entry->filp);
133}
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135void poll_freewait(struct poll_wqueues *pwq)
136{
137 struct poll_table_page * p = pwq->table;
Andi Kleen70674f92006-03-28 01:56:33 -0800138 int i;
139 for (i = 0; i < pwq->inline_index; i++)
140 free_poll_entry(pwq->inline_entries + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 while (p) {
142 struct poll_table_entry * entry;
143 struct poll_table_page *old;
144
145 entry = p->entry;
146 do {
147 entry--;
Andi Kleen70674f92006-03-28 01:56:33 -0800148 free_poll_entry(entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 } while (entry > p->entries);
150 old = p;
151 p = p->next;
152 free_page((unsigned long) old);
153 }
154}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155EXPORT_SYMBOL(poll_freewait);
156
Tejun Heo5f820f62009-01-06 14:40:59 -0800157static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 struct poll_table_page *table = p->table;
160
Andi Kleen70674f92006-03-28 01:56:33 -0800161 if (p->inline_index < N_INLINE_POLL_ENTRIES)
162 return p->inline_entries + p->inline_index++;
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 if (!table || POLL_TABLE_FULL(table)) {
165 struct poll_table_page *new_table;
166
167 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
168 if (!new_table) {
169 p->error = -ENOMEM;
Andi Kleen70674f92006-03-28 01:56:33 -0800170 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 }
172 new_table->entry = new_table->entries;
173 new_table->next = table;
174 p->table = new_table;
175 table = new_table;
176 }
177
Andi Kleen70674f92006-03-28 01:56:33 -0800178 return table->entry++;
179}
180
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700181static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
Tejun Heo5f820f62009-01-06 14:40:59 -0800182{
183 struct poll_wqueues *pwq = wait->private;
184 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
185
186 /*
187 * Although this function is called under waitqueue lock, LOCK
188 * doesn't imply write barrier and the users expect write
189 * barrier semantics on wakeup functions. The following
190 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
191 * and is paired with set_mb() in poll_schedule_timeout.
192 */
193 smp_wmb();
194 pwq->triggered = 1;
195
196 /*
197 * Perform the default wake up operation using a dummy
198 * waitqueue.
199 *
200 * TODO: This is hacky but there currently is no interface to
201 * pass in @sync. @sync is scheduled to be removed and once
202 * that happens, wake_up_process() can be used directly.
203 */
204 return default_wake_function(&dummy_wait, mode, sync, key);
205}
206
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700207static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
208{
209 struct poll_table_entry *entry;
210
211 entry = container_of(wait, struct poll_table_entry, wait);
212 if (key && !((unsigned long)key & entry->key))
213 return 0;
214 return __pollwake(wait, mode, sync, key);
215}
216
Andi Kleen70674f92006-03-28 01:56:33 -0800217/* Add a new entry */
218static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
219 poll_table *p)
220{
Tejun Heo5f820f62009-01-06 14:40:59 -0800221 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
222 struct poll_table_entry *entry = poll_get_entry(pwq);
Andi Kleen70674f92006-03-28 01:56:33 -0800223 if (!entry)
224 return;
Al Virocb0942b2012-08-27 14:48:26 -0400225 entry->filp = get_file(filp);
Andi Kleen70674f92006-03-28 01:56:33 -0800226 entry->wait_address = wait_address;
Hans Verkuil626cf232012-03-23 15:02:27 -0700227 entry->key = p->_key;
Tejun Heo5f820f62009-01-06 14:40:59 -0800228 init_waitqueue_func_entry(&entry->wait, pollwake);
229 entry->wait.private = pwq;
WANG Congccf67802007-05-09 07:10:02 +0200230 add_wait_queue(wait_address, &entry->wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231}
232
Tejun Heo5f820f62009-01-06 14:40:59 -0800233int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
234 ktime_t *expires, unsigned long slack)
235{
236 int rc = -EINTR;
237
238 set_current_state(state);
239 if (!pwq->triggered)
240 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
241 __set_current_state(TASK_RUNNING);
242
243 /*
244 * Prepare for the next iteration.
245 *
246 * The following set_mb() serves two purposes. First, it's
247 * the counterpart rmb of the wmb in pollwake() such that data
248 * written before wake up is always visible after wake up.
249 * Second, the full barrier guarantees that triggered clearing
250 * doesn't pass event check of the next iteration. Note that
251 * this problem doesn't exist for the first iteration as
252 * add_wait_queue() has full barrier semantics.
253 */
254 set_mb(pwq->triggered, 0);
255
256 return rc;
257}
258EXPORT_SYMBOL(poll_schedule_timeout);
259
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700260/**
261 * poll_select_set_timeout - helper function to setup the timeout value
262 * @to: pointer to timespec variable for the final timeout
263 * @sec: seconds (from user space)
264 * @nsec: nanoseconds (from user space)
265 *
266 * Note, we do not use a timespec for the user space value here, That
267 * way we can use the function for timeval and compat interfaces as well.
268 *
269 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
270 */
271int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
272{
273 struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
274
275 if (!timespec_valid(&ts))
276 return -EINVAL;
277
278 /* Optimize for the zero timeout value here */
279 if (!sec && !nsec) {
280 to->tv_sec = to->tv_nsec = 0;
281 } else {
282 ktime_get_ts(to);
283 *to = timespec_add_safe(*to, ts);
284 }
285 return 0;
286}
287
288static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
289 int timeval, int ret)
290{
291 struct timespec rts;
292 struct timeval rtv;
293
294 if (!p)
295 return ret;
296
297 if (current->personality & STICKY_TIMEOUTS)
298 goto sticky;
299
300 /* No update for zero timeout */
301 if (!end_time->tv_sec && !end_time->tv_nsec)
302 return ret;
303
304 ktime_get_ts(&rts);
305 rts = timespec_sub(*end_time, rts);
306 if (rts.tv_sec < 0)
307 rts.tv_sec = rts.tv_nsec = 0;
308
309 if (timeval) {
Vasiliy Kulikov65329bf2011-01-12 17:00:00 -0800310 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
311 memset(&rtv, 0, sizeof(rtv));
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700312 rtv.tv_sec = rts.tv_sec;
313 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
314
315 if (!copy_to_user(p, &rtv, sizeof(rtv)))
316 return ret;
317
318 } else if (!copy_to_user(p, &rts, sizeof(rts)))
319 return ret;
320
321 /*
322 * If an application puts its timeval in read-only memory, we
323 * don't want the Linux-specific update to the timeval to
324 * cause a fault after the select has completed
325 * successfully. However, because we're not updating the
326 * timeval, we can't restart the system call.
327 */
328
329sticky:
330 if (ret == -ERESTARTNOHAND)
331 ret = -EINTR;
332 return ret;
333}
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335#define FDS_IN(fds, n) (fds->in + n)
336#define FDS_OUT(fds, n) (fds->out + n)
337#define FDS_EX(fds, n) (fds->ex + n)
338
339#define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
340
341static int max_select_fd(unsigned long n, fd_set_bits *fds)
342{
343 unsigned long *open_fds;
344 unsigned long set;
345 int max;
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700346 struct fdtable *fdt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 /* handle last in-complete long-word first */
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400349 set = ~(~0UL << (n & (BITS_PER_LONG-1)));
350 n /= BITS_PER_LONG;
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700351 fdt = files_fdtable(current->files);
David Howells1fd36ad2012-02-16 17:49:54 +0000352 open_fds = fdt->open_fds + n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 max = 0;
354 if (set) {
355 set &= BITS(fds, n);
356 if (set) {
357 if (!(set & ~*open_fds))
358 goto get_max;
359 return -EBADF;
360 }
361 }
362 while (n) {
363 open_fds--;
364 n--;
365 set = BITS(fds, n);
366 if (!set)
367 continue;
368 if (set & ~*open_fds)
369 return -EBADF;
370 if (max)
371 continue;
372get_max:
373 do {
374 max++;
375 set >>= 1;
376 } while (set);
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400377 max += n * BITS_PER_LONG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 }
379
380 return max;
381}
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
384#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
385#define POLLEX_SET (POLLPRI)
386
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700387static inline void wait_key_set(poll_table *wait, unsigned long in,
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300388 unsigned long out, unsigned long bit,
389 unsigned int ll_flag)
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700390{
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300391 wait->_key = POLLEX_SET | ll_flag;
Hans Verkuil626cf232012-03-23 15:02:27 -0700392 if (in & bit)
393 wait->_key |= POLLIN_SET;
394 if (out & bit)
395 wait->_key |= POLLOUT_SET;
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700396}
397
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700398int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399{
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700400 ktime_t expire, *to = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 struct poll_wqueues table;
402 poll_table *wait;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700403 int retval, i, timed_out = 0;
Arjan van de Ven90d6e242008-09-01 15:55:35 -0700404 unsigned long slack = 0;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300405 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300406 unsigned long busy_end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Dipankar Sarmab8359962005-09-09 13:04:14 -0700408 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 retval = max_select_fd(n, fds);
Dipankar Sarmab8359962005-09-09 13:04:14 -0700410 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412 if (retval < 0)
413 return retval;
414 n = retval;
415
416 poll_initwait(&table);
417 wait = &table.pt;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700418 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
Hans Verkuil626cf232012-03-23 15:02:27 -0700419 wait->_qproc = NULL;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700420 timed_out = 1;
421 }
422
Arjan van de Ven96d2ab42008-09-07 16:08:55 -0700423 if (end_time && !timed_out)
Andrew Morton231f3d32010-10-27 15:34:53 -0700424 slack = select_estimate_accuracy(end_time);
Arjan van de Ven90d6e242008-09-01 15:55:35 -0700425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 retval = 0;
427 for (;;) {
428 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300429 bool can_busy_loop = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 inp = fds->in; outp = fds->out; exp = fds->ex;
432 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
433
434 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
435 unsigned long in, out, ex, all_bits, bit = 1, mask, j;
436 unsigned long res_in = 0, res_out = 0, res_ex = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 in = *inp++; out = *outp++; ex = *exp++;
439 all_bits = in | out | ex;
440 if (all_bits == 0) {
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400441 i += BITS_PER_LONG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 continue;
443 }
444
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400445 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
Al Viro2903ff02012-08-28 12:52:22 -0400446 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 if (i >= n)
448 break;
449 if (!(bit & all_bits))
450 continue;
Al Viro2903ff02012-08-28 12:52:22 -0400451 f = fdget(i);
452 if (f.file) {
453 const struct file_operations *f_op;
454 f_op = f.file->f_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 mask = DEFAULT_POLLMASK;
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700456 if (f_op && f_op->poll) {
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300457 wait_key_set(wait, in, out,
Eliezer Tamircbf55002013-07-08 16:20:34 +0300458 bit, busy_flag);
Al Viro2903ff02012-08-28 12:52:22 -0400459 mask = (*f_op->poll)(f.file, wait);
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700460 }
Al Viro2903ff02012-08-28 12:52:22 -0400461 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 if ((mask & POLLIN_SET) && (in & bit)) {
463 res_in |= bit;
464 retval++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700465 wait->_qproc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 }
467 if ((mask & POLLOUT_SET) && (out & bit)) {
468 res_out |= bit;
469 retval++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700470 wait->_qproc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
472 if ((mask & POLLEX_SET) && (ex & bit)) {
473 res_ex |= bit;
474 retval++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700475 wait->_qproc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 }
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300477 /* got something, stop busy polling */
Eliezer Tamircbf55002013-07-08 16:20:34 +0300478 if (retval) {
479 can_busy_loop = false;
480 busy_flag = 0;
481
482 /*
483 * only remember a returned
484 * POLL_BUSY_LOOP if we asked for it
485 */
486 } else if (busy_flag & mask)
487 can_busy_loop = true;
488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 }
491 if (res_in)
492 *rinp = res_in;
493 if (res_out)
494 *routp = res_out;
495 if (res_ex)
496 *rexp = res_ex;
Linus Torvalds55d85382008-06-22 12:23:15 -0700497 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 }
Hans Verkuil626cf232012-03-23 15:02:27 -0700499 wait->_qproc = NULL;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700500 if (retval || timed_out || signal_pending(current))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 break;
Pavel Machekf5264482008-04-21 22:15:06 +0000502 if (table.error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 retval = table.error;
504 break;
505 }
David Woodhouse9f729492006-01-18 17:44:05 -0800506
Eliezer Tamircbf55002013-07-08 16:20:34 +0300507 /* only if found POLL_BUSY_LOOP sockets && not out of time */
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300508 if (can_busy_loop && !need_resched()) {
509 if (!busy_end) {
510 busy_end = busy_loop_end_time();
511 continue;
512 }
513 if (!busy_loop_timeout(busy_end))
514 continue;
515 }
516 busy_flag = 0;
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300517
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700518 /*
519 * If this is the first loop and we have a timeout
520 * given, then we convert to ktime_t and set the to
521 * pointer to the expiry value.
522 */
523 if (end_time && !to) {
524 expire = timespec_to_ktime(*end_time);
525 to = &expire;
David Woodhouse9f729492006-01-18 17:44:05 -0800526 }
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700527
Tejun Heo5f820f62009-01-06 14:40:59 -0800528 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
529 to, slack))
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700530 timed_out = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533 poll_freewait(&table);
534
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 return retval;
536}
537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538/*
539 * We can actually return ERESTARTSYS instead of EINTR, but I'd
540 * like to be certain this leads to no problems. So I return
541 * EINTR just for safety.
542 *
543 * Update: ERESTARTSYS breaks at least the xview clock binary, so
544 * I'm trying ERESTARTNOHAND which restart only when you want to.
545 */
Al Viroa2dcb442008-04-23 14:05:15 -0400546int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700547 fd_set __user *exp, struct timespec *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
549 fd_set_bits fds;
Andrew Morton29ff2db2006-04-10 22:52:46 -0700550 void *bits;
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800551 int ret, max_fds;
Mitchell Blank Jrb04eb6a2006-04-10 22:54:08 -0700552 unsigned int size;
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700553 struct fdtable *fdt;
Andi Kleen70674f92006-03-28 01:56:33 -0800554 /* Allocate small arguments on the stack to save memory and be faster */
Jes Sorensen30c14e42006-03-31 11:18:57 -0500555 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 ret = -EINVAL;
558 if (n < 0)
559 goto out_nofds;
560
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800561 /* max_fds can increase, so grab it once to avoid race */
Dipankar Sarmab8359962005-09-09 13:04:14 -0700562 rcu_read_lock();
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700563 fdt = files_fdtable(current->files);
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800564 max_fds = fdt->max_fds;
Dipankar Sarmab8359962005-09-09 13:04:14 -0700565 rcu_read_unlock();
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800566 if (n > max_fds)
567 n = max_fds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
569 /*
570 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
571 * since we used fdset we need to allocate memory in units of
572 * long-words.
573 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 size = FDS_BYTES(n);
Mitchell Blank Jrb04eb6a2006-04-10 22:54:08 -0700575 bits = stack_fds;
576 if (size > sizeof(stack_fds) / 6) {
577 /* Not enough space in on-stack array; must use kmalloc */
578 ret = -ENOMEM;
Andi Kleen70674f92006-03-28 01:56:33 -0800579 bits = kmalloc(6 * size, GFP_KERNEL);
Mitchell Blank Jrb04eb6a2006-04-10 22:54:08 -0700580 if (!bits)
581 goto out_nofds;
582 }
Andrew Morton29ff2db2006-04-10 22:52:46 -0700583 fds.in = bits;
584 fds.out = bits + size;
585 fds.ex = bits + 2*size;
586 fds.res_in = bits + 3*size;
587 fds.res_out = bits + 4*size;
588 fds.res_ex = bits + 5*size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590 if ((ret = get_fd_set(n, inp, fds.in)) ||
591 (ret = get_fd_set(n, outp, fds.out)) ||
592 (ret = get_fd_set(n, exp, fds.ex)))
593 goto out;
594 zero_fd_set(n, fds.res_in);
595 zero_fd_set(n, fds.res_out);
596 zero_fd_set(n, fds.res_ex);
597
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700598 ret = do_select(n, &fds, end_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 if (ret < 0)
601 goto out;
602 if (!ret) {
603 ret = -ERESTARTNOHAND;
604 if (signal_pending(current))
605 goto out;
606 ret = 0;
607 }
608
609 if (set_fd_set(n, inp, fds.res_in) ||
610 set_fd_set(n, outp, fds.res_out) ||
611 set_fd_set(n, exp, fds.res_ex))
612 ret = -EFAULT;
613
614out:
Andi Kleen70674f92006-03-28 01:56:33 -0800615 if (bits != stack_fds)
616 kfree(bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617out_nofds:
618 return ret;
619}
620
Heiko Carstens5a8a82b2009-01-14 14:14:25 +0100621SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
622 fd_set __user *, exp, struct timeval __user *, tvp)
David Woodhouse9f729492006-01-18 17:44:05 -0800623{
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700624 struct timespec end_time, *to = NULL;
David Woodhouse9f729492006-01-18 17:44:05 -0800625 struct timeval tv;
626 int ret;
627
628 if (tvp) {
629 if (copy_from_user(&tv, tvp, sizeof(tv)))
630 return -EFAULT;
631
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700632 to = &end_time;
Arjan van de Ven4d36a9e2008-10-25 12:41:41 -0700633 if (poll_select_set_timeout(to,
634 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
635 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
David Woodhouse9f729492006-01-18 17:44:05 -0800636 return -EINVAL;
David Woodhouse9f729492006-01-18 17:44:05 -0800637 }
638
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700639 ret = core_sys_select(n, inp, outp, exp, to);
640 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
David Woodhouse9f729492006-01-18 17:44:05 -0800641
642 return ret;
643}
644
Heiko Carstensc9da9f22009-01-14 14:13:57 +0100645static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
646 fd_set __user *exp, struct timespec __user *tsp,
647 const sigset_t __user *sigmask, size_t sigsetsize)
David Woodhouse9f729492006-01-18 17:44:05 -0800648{
David Woodhouse9f729492006-01-18 17:44:05 -0800649 sigset_t ksigmask, sigsaved;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700650 struct timespec ts, end_time, *to = NULL;
David Woodhouse9f729492006-01-18 17:44:05 -0800651 int ret;
652
653 if (tsp) {
654 if (copy_from_user(&ts, tsp, sizeof(ts)))
655 return -EFAULT;
656
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700657 to = &end_time;
658 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
David Woodhouse9f729492006-01-18 17:44:05 -0800659 return -EINVAL;
David Woodhouse9f729492006-01-18 17:44:05 -0800660 }
661
662 if (sigmask) {
663 /* XXX: Don't preclude handling different sized sigset_t's. */
664 if (sigsetsize != sizeof(sigset_t))
665 return -EINVAL;
666 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
667 return -EFAULT;
668
669 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
670 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
671 }
672
Bernd Schmidt62568512009-01-13 22:14:48 +0100673 ret = core_sys_select(n, inp, outp, exp, to);
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700674 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
David Woodhouse9f729492006-01-18 17:44:05 -0800675
676 if (ret == -ERESTARTNOHAND) {
677 /*
678 * Don't restore the signal mask yet. Let do_signal() deliver
679 * the signal on the way back to userspace, before the signal
680 * mask is restored.
681 */
682 if (sigmask) {
683 memcpy(&current->saved_sigmask, &sigsaved,
684 sizeof(sigsaved));
Roland McGrath4e4c22c2008-04-30 00:53:06 -0700685 set_restore_sigmask();
David Woodhouse9f729492006-01-18 17:44:05 -0800686 }
687 } else if (sigmask)
688 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
689
690 return ret;
691}
692
693/*
694 * Most architectures can't handle 7-argument syscalls. So we provide a
695 * 6-argument version where the sixth argument is a pointer to a structure
696 * which has a pointer to the sigset_t itself followed by a size_t containing
697 * the sigset size.
698 */
Heiko Carstensd4e82042009-01-14 14:14:34 +0100699SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
700 fd_set __user *, exp, struct timespec __user *, tsp,
701 void __user *, sig)
David Woodhouse9f729492006-01-18 17:44:05 -0800702{
703 size_t sigsetsize = 0;
704 sigset_t __user *up = NULL;
705
706 if (sig) {
707 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
Al Viroe110ab92006-02-01 05:26:09 -0500708 || __get_user(up, (sigset_t __user * __user *)sig)
David Woodhouse9f729492006-01-18 17:44:05 -0800709 || __get_user(sigsetsize,
Al Viroe110ab92006-02-01 05:26:09 -0500710 (size_t __user *)(sig+sizeof(void *))))
David Woodhouse9f729492006-01-18 17:44:05 -0800711 return -EFAULT;
712 }
713
Heiko Carstensc9da9f22009-01-14 14:13:57 +0100714 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
David Woodhouse9f729492006-01-18 17:44:05 -0800715}
David Woodhouse9f729492006-01-18 17:44:05 -0800716
Christoph Hellwig5d0e5282010-03-10 15:21:13 -0800717#ifdef __ARCH_WANT_SYS_OLD_SELECT
718struct sel_arg_struct {
719 unsigned long n;
720 fd_set __user *inp, *outp, *exp;
721 struct timeval __user *tvp;
722};
723
724SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
725{
726 struct sel_arg_struct a;
727
728 if (copy_from_user(&a, arg, sizeof(a)))
729 return -EFAULT;
730 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
731}
732#endif
733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734struct poll_list {
735 struct poll_list *next;
736 int len;
737 struct pollfd entries[0];
738};
739
740#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
741
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700742/*
743 * Fish for pollable events on the pollfd->fd file descriptor. We're only
744 * interested in events matching the pollfd->events mask, and the result
745 * matching that mask is both recorded in pollfd->revents and returned. The
746 * pwait poll_table will be used by the fd-provided poll handler for waiting,
Hans Verkuil626cf232012-03-23 15:02:27 -0700747 * if pwait->_qproc is non-NULL.
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700748 */
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300749static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
Eliezer Tamircbf55002013-07-08 16:20:34 +0300750 bool *can_busy_poll,
751 unsigned int busy_flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752{
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700753 unsigned int mask;
754 int fd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700756 mask = 0;
757 fd = pollfd->fd;
758 if (fd >= 0) {
Al Viro2903ff02012-08-28 12:52:22 -0400759 struct fd f = fdget(fd);
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700760 mask = POLLNVAL;
Al Viro2903ff02012-08-28 12:52:22 -0400761 if (f.file) {
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700762 mask = DEFAULT_POLLMASK;
Al Viro2903ff02012-08-28 12:52:22 -0400763 if (f.file->f_op && f.file->f_op->poll) {
Hans Verkuil626cf232012-03-23 15:02:27 -0700764 pwait->_key = pollfd->events|POLLERR|POLLHUP;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300765 pwait->_key |= busy_flag;
Al Viro2903ff02012-08-28 12:52:22 -0400766 mask = f.file->f_op->poll(f.file, pwait);
Eliezer Tamircbf55002013-07-08 16:20:34 +0300767 if (mask & busy_flag)
768 *can_busy_poll = true;
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700769 }
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700770 /* Mask out unneeded events. */
771 mask &= pollfd->events | POLLERR | POLLHUP;
Al Viro2903ff02012-08-28 12:52:22 -0400772 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 }
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700775 pollfd->revents = mask;
776
777 return mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778}
779
780static int do_poll(unsigned int nfds, struct poll_list *list,
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700781 struct poll_wqueues *wait, struct timespec *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 poll_table* pt = &wait->pt;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700784 ktime_t expire, *to = NULL;
785 int timed_out = 0, count = 0;
Arjan van de Ven90d6e242008-09-01 15:55:35 -0700786 unsigned long slack = 0;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300787 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300788 unsigned long busy_end = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
David Woodhouse9f729492006-01-18 17:44:05 -0800790 /* Optimise the no-wait case */
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700791 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
Hans Verkuil626cf232012-03-23 15:02:27 -0700792 pt->_qproc = NULL;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700793 timed_out = 1;
794 }
Oleg Nesterov9bf084f2007-10-16 23:26:18 -0700795
Arjan van de Ven96d2ab42008-09-07 16:08:55 -0700796 if (end_time && !timed_out)
Andrew Morton231f3d32010-10-27 15:34:53 -0700797 slack = select_estimate_accuracy(end_time);
Arjan van de Ven90d6e242008-09-01 15:55:35 -0700798
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 for (;;) {
800 struct poll_list *walk;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300801 bool can_busy_loop = false;
David Woodhouse9f729492006-01-18 17:44:05 -0800802
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700803 for (walk = list; walk != NULL; walk = walk->next) {
804 struct pollfd * pfd, * pfd_end;
805
806 pfd = walk->entries;
807 pfd_end = pfd + walk->len;
808 for (; pfd != pfd_end; pfd++) {
809 /*
810 * Fish for events. If we found one, record it
Hans Verkuil626cf232012-03-23 15:02:27 -0700811 * and kill poll_table->_qproc, so we don't
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700812 * needlessly register any other waiters after
813 * this. They'll get immediately deregistered
814 * when we break out and return.
815 */
Eliezer Tamircbf55002013-07-08 16:20:34 +0300816 if (do_pollfd(pfd, pt, &can_busy_loop,
817 busy_flag)) {
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700818 count++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700819 pt->_qproc = NULL;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300820 /* found something, stop busy polling */
821 busy_flag = 0;
822 can_busy_loop = false;
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700823 }
824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 }
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700826 /*
827 * All waiters have already been registered, so don't provide
Hans Verkuil626cf232012-03-23 15:02:27 -0700828 * a poll_table->_qproc to them on the next loop iteration.
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700829 */
Hans Verkuil626cf232012-03-23 15:02:27 -0700830 pt->_qproc = NULL;
Oleg Nesterov9bf084f2007-10-16 23:26:18 -0700831 if (!count) {
832 count = wait->error;
833 if (signal_pending(current))
834 count = -EINTR;
835 }
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700836 if (count || timed_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 break;
David Woodhouse9f729492006-01-18 17:44:05 -0800838
Eliezer Tamircbf55002013-07-08 16:20:34 +0300839 /* only if found POLL_BUSY_LOOP sockets && not out of time */
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300840 if (can_busy_loop && !need_resched()) {
841 if (!busy_end) {
842 busy_end = busy_loop_end_time();
843 continue;
844 }
845 if (!busy_loop_timeout(busy_end))
846 continue;
847 }
848 busy_flag = 0;
Eliezer Tamir91e2fd332013-06-28 15:59:35 +0300849
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700850 /*
851 * If this is the first loop and we have a timeout
852 * given, then we convert to ktime_t and set the to
853 * pointer to the expiry value.
854 */
855 if (end_time && !to) {
856 expire = timespec_to_ktime(*end_time);
857 to = &expire;
David Woodhouse9f729492006-01-18 17:44:05 -0800858 }
859
Tejun Heo5f820f62009-01-06 14:40:59 -0800860 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700861 timed_out = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 return count;
864}
865
Andi Kleen70674f92006-03-28 01:56:33 -0800866#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
867 sizeof(struct pollfd))
868
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700869int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
870 struct timespec *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871{
872 struct poll_wqueues table;
Oleg Nesterov252e5722007-10-16 23:26:17 -0700873 int err = -EFAULT, fdcount, len, size;
Jes Sorensen30c14e42006-03-31 11:18:57 -0500874 /* Allocate small arguments on the stack to save memory and be
875 faster - use long to make sure the buffer is aligned properly
876 on 64 bit archs to avoid unaligned access */
877 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
Oleg Nesterov252e5722007-10-16 23:26:17 -0700878 struct poll_list *const head = (struct poll_list *)stack_pps;
879 struct poll_list *walk = head;
880 unsigned long todo = nfds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Jiri Slabyd554ed892010-03-05 13:42:42 -0800882 if (nfds > rlimit(RLIMIT_NOFILE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 return -EINVAL;
884
Oleg Nesterov252e5722007-10-16 23:26:17 -0700885 len = min_t(unsigned int, nfds, N_STACK_PPS);
886 for (;;) {
887 walk->next = NULL;
888 walk->len = len;
889 if (!len)
890 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891
Oleg Nesterov252e5722007-10-16 23:26:17 -0700892 if (copy_from_user(walk->entries, ufds + nfds-todo,
893 sizeof(struct pollfd) * walk->len))
894 goto out_fds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
Oleg Nesterov252e5722007-10-16 23:26:17 -0700896 todo -= walk->len;
897 if (!todo)
898 break;
899
900 len = min(todo, POLLFD_PER_PAGE);
901 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
902 walk = walk->next = kmalloc(size, GFP_KERNEL);
903 if (!walk) {
904 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 goto out_fds;
906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
David Woodhouse9f729492006-01-18 17:44:05 -0800908
Oleg Nesterov252e5722007-10-16 23:26:17 -0700909 poll_initwait(&table);
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700910 fdcount = do_poll(nfds, head, &table, end_time);
Oleg Nesterov252e5722007-10-16 23:26:17 -0700911 poll_freewait(&table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Oleg Nesterov252e5722007-10-16 23:26:17 -0700913 for (walk = head; walk; walk = walk->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 struct pollfd *fds = walk->entries;
915 int j;
916
Oleg Nesterov252e5722007-10-16 23:26:17 -0700917 for (j = 0; j < walk->len; j++, ufds++)
918 if (__put_user(fds[j].revents, &ufds->revents))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 goto out_fds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 }
Oleg Nesterov252e5722007-10-16 23:26:17 -0700921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 err = fdcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923out_fds:
Oleg Nesterov252e5722007-10-16 23:26:17 -0700924 walk = head->next;
925 while (walk) {
926 struct poll_list *pos = walk;
927 walk = walk->next;
928 kfree(pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 }
Oleg Nesterov252e5722007-10-16 23:26:17 -0700930
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 return err;
932}
David Woodhouse9f729492006-01-18 17:44:05 -0800933
Chris Wright3075d9d2007-10-16 23:27:18 -0700934static long do_restart_poll(struct restart_block *restart_block)
935{
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700936 struct pollfd __user *ufds = restart_block->poll.ufds;
937 int nfds = restart_block->poll.nfds;
938 struct timespec *to = NULL, end_time;
Chris Wright3075d9d2007-10-16 23:27:18 -0700939 int ret;
940
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700941 if (restart_block->poll.has_timeout) {
942 end_time.tv_sec = restart_block->poll.tv_sec;
943 end_time.tv_nsec = restart_block->poll.tv_nsec;
944 to = &end_time;
945 }
946
947 ret = do_sys_poll(ufds, nfds, to);
948
Chris Wright3075d9d2007-10-16 23:27:18 -0700949 if (ret == -EINTR) {
950 restart_block->fn = do_restart_poll;
Chris Wright3075d9d2007-10-16 23:27:18 -0700951 ret = -ERESTART_RESTARTBLOCK;
952 }
953 return ret;
954}
955
Heiko Carstens5a8a82b2009-01-14 14:14:25 +0100956SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
Linus Torvaldsfaf30902012-02-21 17:24:20 -0800957 int, timeout_msecs)
David Woodhouse9f729492006-01-18 17:44:05 -0800958{
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700959 struct timespec end_time, *to = NULL;
Chris Wright3075d9d2007-10-16 23:27:18 -0700960 int ret;
David Woodhouse9f729492006-01-18 17:44:05 -0800961
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700962 if (timeout_msecs >= 0) {
963 to = &end_time;
964 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
965 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
David Woodhouse9f729492006-01-18 17:44:05 -0800966 }
967
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700968 ret = do_sys_poll(ufds, nfds, to);
969
Chris Wright3075d9d2007-10-16 23:27:18 -0700970 if (ret == -EINTR) {
971 struct restart_block *restart_block;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700972
Chris Wright3075d9d2007-10-16 23:27:18 -0700973 restart_block = &current_thread_info()->restart_block;
974 restart_block->fn = do_restart_poll;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700975 restart_block->poll.ufds = ufds;
976 restart_block->poll.nfds = nfds;
977
978 if (timeout_msecs >= 0) {
979 restart_block->poll.tv_sec = end_time.tv_sec;
980 restart_block->poll.tv_nsec = end_time.tv_nsec;
981 restart_block->poll.has_timeout = 1;
982 } else
983 restart_block->poll.has_timeout = 0;
984
Chris Wright3075d9d2007-10-16 23:27:18 -0700985 ret = -ERESTART_RESTARTBLOCK;
986 }
987 return ret;
David Woodhouse9f729492006-01-18 17:44:05 -0800988}
989
Heiko Carstensd4e82042009-01-14 14:14:34 +0100990SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
991 struct timespec __user *, tsp, const sigset_t __user *, sigmask,
992 size_t, sigsetsize)
David Woodhouse9f729492006-01-18 17:44:05 -0800993{
994 sigset_t ksigmask, sigsaved;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700995 struct timespec ts, end_time, *to = NULL;
David Woodhouse9f729492006-01-18 17:44:05 -0800996 int ret;
997
998 if (tsp) {
999 if (copy_from_user(&ts, tsp, sizeof(ts)))
1000 return -EFAULT;
1001
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001002 to = &end_time;
1003 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1004 return -EINVAL;
David Woodhouse9f729492006-01-18 17:44:05 -08001005 }
1006
1007 if (sigmask) {
1008 /* XXX: Don't preclude handling different sized sigset_t's. */
1009 if (sigsetsize != sizeof(sigset_t))
1010 return -EINVAL;
1011 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1012 return -EFAULT;
1013
1014 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1015 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1016 }
1017
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001018 ret = do_sys_poll(ufds, nfds, to);
David Woodhouse9f729492006-01-18 17:44:05 -08001019
1020 /* We can restart this syscall, usually */
1021 if (ret == -EINTR) {
1022 /*
1023 * Don't restore the signal mask yet. Let do_signal() deliver
1024 * the signal on the way back to userspace, before the signal
1025 * mask is restored.
1026 */
1027 if (sigmask) {
1028 memcpy(&current->saved_sigmask, &sigsaved,
1029 sizeof(sigsaved));
Roland McGrath4e4c22c2008-04-30 00:53:06 -07001030 set_restore_sigmask();
David Woodhouse9f729492006-01-18 17:44:05 -08001031 }
1032 ret = -ERESTARTNOHAND;
1033 } else if (sigmask)
1034 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1035
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001036 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
David Woodhouse9f729492006-01-18 17:44:05 -08001037
1038 return ret;
1039}