blob: 945896d0ac9e7624db36d48169ebc757d4f18adb [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * This file contains the procedures for the handling of select and poll
4 *
5 * Created for Linux based loosely upon Mathius Lattner's minix
6 * patches by Peter MacDonald. Heavily edited by Linus.
7 *
8 * 4 February 1994
9 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
10 * flag set in its personality we do *not* modify the given timeout
11 * parameter to reflect time remaining.
12 *
13 * 24 January 2000
14 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
15 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
16 */
17
Milind Arun Choudhary022a1692007-05-08 00:29:02 -070018#include <linux/kernel.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010019#include <linux/sched/signal.h>
20#include <linux/sched/rt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/syscalls.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050022#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/poll.h>
25#include <linux/personality.h> /* for STICKY_TIMEOUTS */
26#include <linux/file.h>
Al Viro9f3acc32008-04-24 07:44:08 -040027#include <linux/fdtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/fs.h>
Dipankar Sarmab8359962005-09-09 13:04:14 -070029#include <linux/rcupdate.h>
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -070030#include <linux/hrtimer.h>
Colin Cross9745cdb2013-05-06 23:50:17 +000031#include <linux/freezer.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030032#include <net/busy_poll.h>
Vlastimil Babka2d193092016-10-11 13:51:14 -070033#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080035#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Arjan van de Ven90d6e242008-09-01 15:55:35 -070037
38/*
39 * Estimate expected accuracy in ns from a timeval.
40 *
41 * After quite a bit of churning around, we've settled on
42 * a simple thing of taking 0.1% of the timeout as the
43 * slack, with a cap of 100 msec.
44 * "nice" tasks get a 0.5% slack instead.
45 *
46 * Consider this comment an open invitation to come up with even
47 * better solutions..
48 */
49
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070050#define MAX_SLACK (100 * NSEC_PER_MSEC)
51
Deepa Dinamani766b9f92016-05-19 17:09:05 -070052static long __estimate_accuracy(struct timespec64 *tv)
Arjan van de Ven90d6e242008-09-01 15:55:35 -070053{
Arjan van de Ven96d2ab42008-09-07 16:08:55 -070054 long slack;
Arjan van de Ven90d6e242008-09-01 15:55:35 -070055 int divfactor = 1000;
56
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070057 if (tv->tv_sec < 0)
58 return 0;
59
Arjan van de Ven4ce105d2008-09-07 15:31:39 -070060 if (task_nice(current) > 0)
Arjan van de Ven90d6e242008-09-01 15:55:35 -070061 divfactor = divfactor / 5;
62
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070063 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
64 return MAX_SLACK;
65
Arjan van de Ven90d6e242008-09-01 15:55:35 -070066 slack = tv->tv_nsec / divfactor;
67 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
68
Guillaume Knispel5ae87e72009-09-22 16:43:30 -070069 if (slack > MAX_SLACK)
70 return MAX_SLACK;
Arjan van de Ven96d2ab42008-09-07 16:08:55 -070071
Arjan van de Ven90d6e242008-09-01 15:55:35 -070072 return slack;
73}
74
Deepa Dinamani766b9f92016-05-19 17:09:05 -070075u64 select_estimate_accuracy(struct timespec64 *tv)
Arjan van de Ven90d6e242008-09-01 15:55:35 -070076{
John Stultzda8b44d2016-03-17 14:20:51 -070077 u64 ret;
Deepa Dinamani766b9f92016-05-19 17:09:05 -070078 struct timespec64 now;
Arjan van de Ven90d6e242008-09-01 15:55:35 -070079
80 /*
81 * Realtime tasks get a slack of 0 for obvious reasons.
82 */
83
Arjan van de Ven4ce105d2008-09-07 15:31:39 -070084 if (rt_task(current))
Arjan van de Ven90d6e242008-09-01 15:55:35 -070085 return 0;
86
Deepa Dinamani766b9f92016-05-19 17:09:05 -070087 ktime_get_ts64(&now);
88 now = timespec64_sub(*tv, now);
Arjan van de Ven90d6e242008-09-01 15:55:35 -070089 ret = __estimate_accuracy(&now);
90 if (ret < current->timer_slack_ns)
91 return current->timer_slack_ns;
92 return ret;
93}
94
95
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097struct poll_table_page {
98 struct poll_table_page * next;
99 struct poll_table_entry * entry;
Gustavo A. R. Silva5e01fdf2020-08-31 08:25:42 -0500100 struct poll_table_entry entries[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101};
102
103#define POLL_TABLE_FULL(table) \
104 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
105
106/*
107 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
108 * I have rewritten this, taking some shortcuts: This code may not be easy to
109 * follow, but it should be free of race-conditions, and it's practical. If you
110 * understand what I'm doing here, then you understand how the linux
111 * sleep/wakeup mechanism works.
112 *
113 * Two very simple procedures, poll_wait() and poll_freewait() make all the
114 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
115 * as all select/poll functions have to call it to add an entry to the
116 * poll table.
117 */
Adrian Bunk75c96f82005-05-05 16:16:09 -0700118static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
119 poll_table *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121void poll_initwait(struct poll_wqueues *pwq)
122{
123 init_poll_funcptr(&pwq->pt, __pollwait);
Tejun Heo5f820f62009-01-06 14:40:59 -0800124 pwq->polling_task = current;
Guillaume Knispelb2add732009-08-15 19:30:24 +0200125 pwq->triggered = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 pwq->error = 0;
127 pwq->table = NULL;
Andi Kleen70674f92006-03-28 01:56:33 -0800128 pwq->inline_index = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130EXPORT_SYMBOL(poll_initwait);
131
Andi Kleen70674f92006-03-28 01:56:33 -0800132static void free_poll_entry(struct poll_table_entry *entry)
133{
WANG Congccf67802007-05-09 07:10:02 +0200134 remove_wait_queue(entry->wait_address, &entry->wait);
Andi Kleen70674f92006-03-28 01:56:33 -0800135 fput(entry->filp);
136}
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138void poll_freewait(struct poll_wqueues *pwq)
139{
140 struct poll_table_page * p = pwq->table;
Andi Kleen70674f92006-03-28 01:56:33 -0800141 int i;
142 for (i = 0; i < pwq->inline_index; i++)
143 free_poll_entry(pwq->inline_entries + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 while (p) {
145 struct poll_table_entry * entry;
146 struct poll_table_page *old;
147
148 entry = p->entry;
149 do {
150 entry--;
Andi Kleen70674f92006-03-28 01:56:33 -0800151 free_poll_entry(entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 } while (entry > p->entries);
153 old = p;
154 p = p->next;
155 free_page((unsigned long) old);
156 }
157}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158EXPORT_SYMBOL(poll_freewait);
159
Tejun Heo5f820f62009-01-06 14:40:59 -0800160static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 struct poll_table_page *table = p->table;
163
Andi Kleen70674f92006-03-28 01:56:33 -0800164 if (p->inline_index < N_INLINE_POLL_ENTRIES)
165 return p->inline_entries + p->inline_index++;
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 if (!table || POLL_TABLE_FULL(table)) {
168 struct poll_table_page *new_table;
169
170 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
171 if (!new_table) {
172 p->error = -ENOMEM;
Andi Kleen70674f92006-03-28 01:56:33 -0800173 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 }
175 new_table->entry = new_table->entries;
176 new_table->next = table;
177 p->table = new_table;
178 table = new_table;
179 }
180
Andi Kleen70674f92006-03-28 01:56:33 -0800181 return table->entry++;
182}
183
Ingo Molnarac6424b2017-06-20 12:06:13 +0200184static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
Tejun Heo5f820f62009-01-06 14:40:59 -0800185{
186 struct poll_wqueues *pwq = wait->private;
187 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
188
189 /*
190 * Although this function is called under waitqueue lock, LOCK
191 * doesn't imply write barrier and the users expect write
192 * barrier semantics on wakeup functions. The following
193 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200194 * and is paired with smp_store_mb() in poll_schedule_timeout.
Tejun Heo5f820f62009-01-06 14:40:59 -0800195 */
196 smp_wmb();
197 pwq->triggered = 1;
198
199 /*
200 * Perform the default wake up operation using a dummy
201 * waitqueue.
202 *
203 * TODO: This is hacky but there currently is no interface to
204 * pass in @sync. @sync is scheduled to be removed and once
205 * that happens, wake_up_process() can be used directly.
206 */
207 return default_wake_function(&dummy_wait, mode, sync, key);
208}
209
Ingo Molnarac6424b2017-06-20 12:06:13 +0200210static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700211{
212 struct poll_table_entry *entry;
213
214 entry = container_of(wait, struct poll_table_entry, wait);
Al Viro3ad6f932017-07-03 20:14:56 -0400215 if (key && !(key_to_poll(key) & entry->key))
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700216 return 0;
217 return __pollwake(wait, mode, sync, key);
218}
219
Andi Kleen70674f92006-03-28 01:56:33 -0800220/* Add a new entry */
221static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
222 poll_table *p)
223{
Tejun Heo5f820f62009-01-06 14:40:59 -0800224 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
225 struct poll_table_entry *entry = poll_get_entry(pwq);
Andi Kleen70674f92006-03-28 01:56:33 -0800226 if (!entry)
227 return;
Al Virocb0942b2012-08-27 14:48:26 -0400228 entry->filp = get_file(filp);
Andi Kleen70674f92006-03-28 01:56:33 -0800229 entry->wait_address = wait_address;
Hans Verkuil626cf232012-03-23 15:02:27 -0700230 entry->key = p->_key;
Tejun Heo5f820f62009-01-06 14:40:59 -0800231 init_waitqueue_func_entry(&entry->wait, pollwake);
232 entry->wait.private = pwq;
WANG Congccf67802007-05-09 07:10:02 +0200233 add_wait_queue(wait_address, &entry->wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234}
235
Christoph Hellwig8f546ae2018-01-11 12:23:05 +0100236static int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
Tejun Heo5f820f62009-01-06 14:40:59 -0800237 ktime_t *expires, unsigned long slack)
238{
239 int rc = -EINTR;
240
241 set_current_state(state);
242 if (!pwq->triggered)
Rafael J. Wysocki59612d12013-10-29 23:43:08 +0100243 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
Tejun Heo5f820f62009-01-06 14:40:59 -0800244 __set_current_state(TASK_RUNNING);
245
246 /*
247 * Prepare for the next iteration.
248 *
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200249 * The following smp_store_mb() serves two purposes. First, it's
Tejun Heo5f820f62009-01-06 14:40:59 -0800250 * the counterpart rmb of the wmb in pollwake() such that data
251 * written before wake up is always visible after wake up.
252 * Second, the full barrier guarantees that triggered clearing
253 * doesn't pass event check of the next iteration. Note that
254 * this problem doesn't exist for the first iteration as
255 * add_wait_queue() has full barrier semantics.
256 */
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200257 smp_store_mb(pwq->triggered, 0);
Tejun Heo5f820f62009-01-06 14:40:59 -0800258
259 return rc;
260}
Tejun Heo5f820f62009-01-06 14:40:59 -0800261
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700262/**
263 * poll_select_set_timeout - helper function to setup the timeout value
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700264 * @to: pointer to timespec64 variable for the final timeout
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700265 * @sec: seconds (from user space)
266 * @nsec: nanoseconds (from user space)
267 *
268 * Note, we do not use a timespec for the user space value here, That
269 * way we can use the function for timeval and compat interfaces as well.
270 *
271 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
272 */
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700273int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700274{
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700275 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700276
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700277 if (!timespec64_valid(&ts))
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700278 return -EINVAL;
279
280 /* Optimize for the zero timeout value here */
281 if (!sec && !nsec) {
282 to->tv_sec = to->tv_nsec = 0;
283 } else {
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700284 ktime_get_ts64(to);
285 *to = timespec64_add_safe(*to, ts);
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700286 }
287 return 0;
288}
289
Deepa Dinamani8bd27a32018-09-19 21:41:06 -0700290enum poll_time_type {
291 PT_TIMEVAL = 0,
292 PT_OLD_TIMEVAL = 1,
293 PT_TIMESPEC = 2,
294 PT_OLD_TIMESPEC = 3,
295};
296
Oleg Nesterovac301022019-07-16 16:29:59 -0700297static int poll_select_finish(struct timespec64 *end_time,
298 void __user *p,
299 enum poll_time_type pt_type, int ret)
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700300{
Deepa Dinamani36819ad2017-08-04 21:12:31 -0700301 struct timespec64 rts;
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700302
Oleg Nesterovac301022019-07-16 16:29:59 -0700303 restore_saved_sigmask_unless(ret == -ERESTARTNOHAND);
304
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700305 if (!p)
306 return ret;
307
308 if (current->personality & STICKY_TIMEOUTS)
309 goto sticky;
310
311 /* No update for zero timeout */
312 if (!end_time->tv_sec && !end_time->tv_nsec)
313 return ret;
314
Deepa Dinamani36819ad2017-08-04 21:12:31 -0700315 ktime_get_ts64(&rts);
316 rts = timespec64_sub(*end_time, rts);
317 if (rts.tv_sec < 0)
318 rts.tv_sec = rts.tv_nsec = 0;
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700319
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700320
Deepa Dinamani8bd27a32018-09-19 21:41:06 -0700321 switch (pt_type) {
322 case PT_TIMEVAL:
323 {
Arnd Bergmann75d319c2019-10-25 22:56:17 +0200324 struct __kernel_old_timeval rtv;
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700325
Deepa Dinamani8bd27a32018-09-19 21:41:06 -0700326 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
327 memset(&rtv, 0, sizeof(rtv));
328 rtv.tv_sec = rts.tv_sec;
329 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
330 if (!copy_to_user(p, &rtv, sizeof(rtv)))
331 return ret;
332 }
333 break;
334 case PT_OLD_TIMEVAL:
335 {
336 struct old_timeval32 rtv;
337
338 rtv.tv_sec = rts.tv_sec;
339 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
340 if (!copy_to_user(p, &rtv, sizeof(rtv)))
341 return ret;
342 }
343 break;
344 case PT_TIMESPEC:
345 if (!put_timespec64(&rts, p))
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700346 return ret;
Deepa Dinamani8bd27a32018-09-19 21:41:06 -0700347 break;
348 case PT_OLD_TIMESPEC:
349 if (!put_old_timespec32(&rts, p))
350 return ret;
351 break;
352 default:
353 BUG();
354 }
Thomas Gleixnerb773ad42008-08-31 08:16:57 -0700355 /*
356 * If an application puts its timeval in read-only memory, we
357 * don't want the Linux-specific update to the timeval to
358 * cause a fault after the select has completed
359 * successfully. However, because we're not updating the
360 * timeval, we can't restart the system call.
361 */
362
363sticky:
364 if (ret == -ERESTARTNOHAND)
365 ret = -EINTR;
366 return ret;
367}
368
Al Viroe99ca562017-04-08 16:50:24 -0400369/*
370 * Scalable version of the fd_set.
371 */
372
373typedef struct {
374 unsigned long *in, *out, *ex;
375 unsigned long *res_in, *res_out, *res_ex;
376} fd_set_bits;
377
378/*
379 * How many longwords for "nr" bits?
380 */
381#define FDS_BITPERLONG (8*sizeof(long))
382#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
383#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
384
385/*
Al Viroe99ca562017-04-08 16:50:24 -0400386 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
387 */
388static inline
389int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
390{
391 nr = FDS_BYTES(nr);
392 if (ufdset)
393 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
394
395 memset(fdset, 0, nr);
396 return 0;
397}
398
399static inline unsigned long __must_check
400set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
401{
402 if (ufdset)
403 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
404 return 0;
405}
406
407static inline
408void zero_fd_set(unsigned long nr, unsigned long *fdset)
409{
410 memset(fdset, 0, FDS_BYTES(nr));
411}
412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413#define FDS_IN(fds, n) (fds->in + n)
414#define FDS_OUT(fds, n) (fds->out + n)
415#define FDS_EX(fds, n) (fds->ex + n)
416
417#define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
418
419static int max_select_fd(unsigned long n, fd_set_bits *fds)
420{
421 unsigned long *open_fds;
422 unsigned long set;
423 int max;
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700424 struct fdtable *fdt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
426 /* handle last in-complete long-word first */
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400427 set = ~(~0UL << (n & (BITS_PER_LONG-1)));
428 n /= BITS_PER_LONG;
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700429 fdt = files_fdtable(current->files);
David Howells1fd36ad2012-02-16 17:49:54 +0000430 open_fds = fdt->open_fds + n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 max = 0;
432 if (set) {
433 set &= BITS(fds, n);
434 if (set) {
435 if (!(set & ~*open_fds))
436 goto get_max;
437 return -EBADF;
438 }
439 }
440 while (n) {
441 open_fds--;
442 n--;
443 set = BITS(fds, n);
444 if (!set)
445 continue;
446 if (set & ~*open_fds)
447 return -EBADF;
448 if (max)
449 continue;
450get_max:
451 do {
452 max++;
453 set >>= 1;
454 } while (set);
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400455 max += n * BITS_PER_LONG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 }
457
458 return max;
459}
460
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800461#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR)
462#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR)
463#define POLLEX_SET (EPOLLPRI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700465static inline void wait_key_set(poll_table *wait, unsigned long in,
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300466 unsigned long out, unsigned long bit,
Al Viro01699432017-07-03 03:14:15 -0400467 __poll_t ll_flag)
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700468{
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300469 wait->_key = POLLEX_SET | ll_flag;
Hans Verkuil626cf232012-03-23 15:02:27 -0700470 if (in & bit)
471 wait->_key |= POLLIN_SET;
472 if (out & bit)
473 wait->_key |= POLLOUT_SET;
Eric Dumazet4938d7e2009-06-16 15:33:36 -0700474}
475
Al Viroe99ca562017-04-08 16:50:24 -0400476static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477{
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700478 ktime_t expire, *to = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 struct poll_wqueues table;
480 poll_table *wait;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700481 int retval, i, timed_out = 0;
John Stultzda8b44d2016-03-17 14:20:51 -0700482 u64 slack = 0;
Al Viro01699432017-07-03 03:14:15 -0400483 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
Alexander Duyck37056712017-03-24 10:08:18 -0700484 unsigned long busy_start = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Dipankar Sarmab8359962005-09-09 13:04:14 -0700486 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 retval = max_select_fd(n, fds);
Dipankar Sarmab8359962005-09-09 13:04:14 -0700488 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
490 if (retval < 0)
491 return retval;
492 n = retval;
493
494 poll_initwait(&table);
495 wait = &table.pt;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700496 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
Hans Verkuil626cf232012-03-23 15:02:27 -0700497 wait->_qproc = NULL;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700498 timed_out = 1;
499 }
500
Arjan van de Ven96d2ab42008-09-07 16:08:55 -0700501 if (end_time && !timed_out)
Andrew Morton231f3d32010-10-27 15:34:53 -0700502 slack = select_estimate_accuracy(end_time);
Arjan van de Ven90d6e242008-09-01 15:55:35 -0700503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 retval = 0;
505 for (;;) {
506 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300507 bool can_busy_loop = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 inp = fds->in; outp = fds->out; exp = fds->ex;
510 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
511
512 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
Al Viroe6c8adc2017-07-03 22:25:56 -0400513 unsigned long in, out, ex, all_bits, bit = 1, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 unsigned long res_in = 0, res_out = 0, res_ex = 0;
Al Viroe6c8adc2017-07-03 22:25:56 -0400515 __poll_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
517 in = *inp++; out = *outp++; ex = *exp++;
518 all_bits = in | out | ex;
519 if (all_bits == 0) {
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400520 i += BITS_PER_LONG;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 continue;
522 }
523
Josh Boyer8ded2bb2012-07-25 10:40:34 -0400524 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
Al Viro2903ff02012-08-28 12:52:22 -0400525 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 if (i >= n)
527 break;
528 if (!(bit & all_bits))
529 continue;
Al Viro2903ff02012-08-28 12:52:22 -0400530 f = fdget(i);
531 if (f.file) {
Christoph Hellwig9965ed172018-03-05 07:26:05 -0800532 wait_key_set(wait, in, out, bit,
533 busy_flag);
534 mask = vfs_poll(f.file, wait);
535
Al Viro2903ff02012-08-28 12:52:22 -0400536 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 if ((mask & POLLIN_SET) && (in & bit)) {
538 res_in |= bit;
539 retval++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700540 wait->_qproc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 }
542 if ((mask & POLLOUT_SET) && (out & bit)) {
543 res_out |= bit;
544 retval++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700545 wait->_qproc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 }
547 if ((mask & POLLEX_SET) && (ex & bit)) {
548 res_ex |= bit;
549 retval++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700550 wait->_qproc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 }
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300552 /* got something, stop busy polling */
Eliezer Tamircbf55002013-07-08 16:20:34 +0300553 if (retval) {
554 can_busy_loop = false;
555 busy_flag = 0;
556
557 /*
558 * only remember a returned
559 * POLL_BUSY_LOOP if we asked for it
560 */
561 } else if (busy_flag & mask)
562 can_busy_loop = true;
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566 if (res_in)
567 *rinp = res_in;
568 if (res_out)
569 *routp = res_out;
570 if (res_ex)
571 *rexp = res_ex;
Linus Torvalds55d85382008-06-22 12:23:15 -0700572 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 }
Hans Verkuil626cf232012-03-23 15:02:27 -0700574 wait->_qproc = NULL;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700575 if (retval || timed_out || signal_pending(current))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 break;
Pavel Machekf5264482008-04-21 22:15:06 +0000577 if (table.error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 retval = table.error;
579 break;
580 }
David Woodhouse9f729492006-01-18 17:44:05 -0800581
Eliezer Tamircbf55002013-07-08 16:20:34 +0300582 /* only if found POLL_BUSY_LOOP sockets && not out of time */
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300583 if (can_busy_loop && !need_resched()) {
Alexander Duyck37056712017-03-24 10:08:18 -0700584 if (!busy_start) {
585 busy_start = busy_loop_current_time();
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300586 continue;
587 }
Alexander Duyck37056712017-03-24 10:08:18 -0700588 if (!busy_loop_timeout(busy_start))
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300589 continue;
590 }
591 busy_flag = 0;
Eliezer Tamir2d48d672013-06-24 10:28:03 +0300592
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700593 /*
594 * If this is the first loop and we have a timeout
595 * given, then we convert to ktime_t and set the to
596 * pointer to the expiry value.
597 */
598 if (end_time && !to) {
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700599 expire = timespec64_to_ktime(*end_time);
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700600 to = &expire;
David Woodhouse9f729492006-01-18 17:44:05 -0800601 }
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700602
Tejun Heo5f820f62009-01-06 14:40:59 -0800603 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
604 to, slack))
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700605 timed_out = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
608 poll_freewait(&table);
609
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 return retval;
611}
612
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613/*
614 * We can actually return ERESTARTSYS instead of EINTR, but I'd
615 * like to be certain this leads to no problems. So I return
616 * EINTR just for safety.
617 *
618 * Update: ERESTARTSYS breaks at least the xview clock binary, so
619 * I'm trying ERESTARTNOHAND which restart only when you want to.
620 */
Al Viroa2dcb442008-04-23 14:05:15 -0400621int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700622 fd_set __user *exp, struct timespec64 *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
624 fd_set_bits fds;
Andrew Morton29ff2db2006-04-10 22:52:46 -0700625 void *bits;
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800626 int ret, max_fds;
Vlastimil Babka2d193092016-10-11 13:51:14 -0700627 size_t size, alloc_size;
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700628 struct fdtable *fdt;
Andi Kleen70674f92006-03-28 01:56:33 -0800629 /* Allocate small arguments on the stack to save memory and be faster */
Jes Sorensen30c14e42006-03-31 11:18:57 -0500630 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 ret = -EINVAL;
633 if (n < 0)
634 goto out_nofds;
635
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800636 /* max_fds can increase, so grab it once to avoid race */
Dipankar Sarmab8359962005-09-09 13:04:14 -0700637 rcu_read_lock();
Dipankar Sarmabadf1662005-09-09 13:04:10 -0700638 fdt = files_fdtable(current->files);
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800639 max_fds = fdt->max_fds;
Dipankar Sarmab8359962005-09-09 13:04:14 -0700640 rcu_read_unlock();
Vadim Lobanovbbea9f62006-12-10 02:21:12 -0800641 if (n > max_fds)
642 n = max_fds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644 /*
645 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
646 * since we used fdset we need to allocate memory in units of
647 * long-words.
648 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 size = FDS_BYTES(n);
Mitchell Blank Jrb04eb6a2006-04-10 22:54:08 -0700650 bits = stack_fds;
651 if (size > sizeof(stack_fds) / 6) {
652 /* Not enough space in on-stack array; must use kmalloc */
653 ret = -ENOMEM;
Vlastimil Babka2d193092016-10-11 13:51:14 -0700654 if (size > (SIZE_MAX / 6))
655 goto out_nofds;
656
657 alloc_size = 6 * size;
Michal Hocko752ade62017-05-08 15:57:27 -0700658 bits = kvmalloc(alloc_size, GFP_KERNEL);
Mitchell Blank Jrb04eb6a2006-04-10 22:54:08 -0700659 if (!bits)
660 goto out_nofds;
661 }
Andrew Morton29ff2db2006-04-10 22:52:46 -0700662 fds.in = bits;
663 fds.out = bits + size;
664 fds.ex = bits + 2*size;
665 fds.res_in = bits + 3*size;
666 fds.res_out = bits + 4*size;
667 fds.res_ex = bits + 5*size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669 if ((ret = get_fd_set(n, inp, fds.in)) ||
670 (ret = get_fd_set(n, outp, fds.out)) ||
671 (ret = get_fd_set(n, exp, fds.ex)))
672 goto out;
673 zero_fd_set(n, fds.res_in);
674 zero_fd_set(n, fds.res_out);
675 zero_fd_set(n, fds.res_ex);
676
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700677 ret = do_select(n, &fds, end_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 if (ret < 0)
680 goto out;
681 if (!ret) {
682 ret = -ERESTARTNOHAND;
683 if (signal_pending(current))
684 goto out;
685 ret = 0;
686 }
687
688 if (set_fd_set(n, inp, fds.res_in) ||
689 set_fd_set(n, outp, fds.res_out) ||
690 set_fd_set(n, exp, fds.res_ex))
691 ret = -EFAULT;
692
693out:
Andi Kleen70674f92006-03-28 01:56:33 -0800694 if (bits != stack_fds)
Vlastimil Babka2d193092016-10-11 13:51:14 -0700695 kvfree(bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696out_nofds:
697 return ret;
698}
699
Dominik Brodowski4bdb9ac2018-03-18 07:53:04 +0100700static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
Arnd Bergmann75d319c2019-10-25 22:56:17 +0200701 fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
David Woodhouse9f729492006-01-18 17:44:05 -0800702{
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700703 struct timespec64 end_time, *to = NULL;
Arnd Bergmann75d319c2019-10-25 22:56:17 +0200704 struct __kernel_old_timeval tv;
David Woodhouse9f729492006-01-18 17:44:05 -0800705 int ret;
706
707 if (tvp) {
708 if (copy_from_user(&tv, tvp, sizeof(tv)))
709 return -EFAULT;
710
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700711 to = &end_time;
Arjan van de Ven4d36a9e2008-10-25 12:41:41 -0700712 if (poll_select_set_timeout(to,
713 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
714 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
David Woodhouse9f729492006-01-18 17:44:05 -0800715 return -EINVAL;
David Woodhouse9f729492006-01-18 17:44:05 -0800716 }
717
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700718 ret = core_sys_select(n, inp, outp, exp, to);
Oleg Nesterovac301022019-07-16 16:29:59 -0700719 return poll_select_finish(&end_time, tvp, PT_TIMEVAL, ret);
David Woodhouse9f729492006-01-18 17:44:05 -0800720}
721
Dominik Brodowski4bdb9ac2018-03-18 07:53:04 +0100722SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
Arnd Bergmann75d319c2019-10-25 22:56:17 +0200723 fd_set __user *, exp, struct __kernel_old_timeval __user *, tvp)
Dominik Brodowski4bdb9ac2018-03-18 07:53:04 +0100724{
725 return kern_select(n, inp, outp, exp, tvp);
726}
727
Heiko Carstensc9da9f22009-01-14 14:13:57 +0100728static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
Deepa Dinamanie0247072018-09-19 21:41:07 -0700729 fd_set __user *exp, void __user *tsp,
730 const sigset_t __user *sigmask, size_t sigsetsize,
731 enum poll_time_type type)
David Woodhouse9f729492006-01-18 17:44:05 -0800732{
Deepa Dinamani36819ad2017-08-04 21:12:31 -0700733 struct timespec64 ts, end_time, *to = NULL;
David Woodhouse9f729492006-01-18 17:44:05 -0800734 int ret;
735
736 if (tsp) {
Deepa Dinamanie0247072018-09-19 21:41:07 -0700737 switch (type) {
738 case PT_TIMESPEC:
739 if (get_timespec64(&ts, tsp))
740 return -EFAULT;
741 break;
742 case PT_OLD_TIMESPEC:
743 if (get_old_timespec32(&ts, tsp))
744 return -EFAULT;
745 break;
746 default:
747 BUG();
748 }
David Woodhouse9f729492006-01-18 17:44:05 -0800749
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700750 to = &end_time;
Deepa Dinamani36819ad2017-08-04 21:12:31 -0700751 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
David Woodhouse9f729492006-01-18 17:44:05 -0800752 return -EINVAL;
David Woodhouse9f729492006-01-18 17:44:05 -0800753 }
754
Oleg Nesterovb7724342019-07-16 16:29:53 -0700755 ret = set_user_sigmask(sigmask, sigsetsize);
Deepa Dinamanided653c2018-09-19 21:41:04 -0700756 if (ret)
757 return ret;
David Woodhouse9f729492006-01-18 17:44:05 -0800758
Bernd Schmidt62568512009-01-13 22:14:48 +0100759 ret = core_sys_select(n, inp, outp, exp, to);
Oleg Nesterovac301022019-07-16 16:29:59 -0700760 return poll_select_finish(&end_time, tsp, type, ret);
David Woodhouse9f729492006-01-18 17:44:05 -0800761}
762
763/*
764 * Most architectures can't handle 7-argument syscalls. So we provide a
765 * 6-argument version where the sixth argument is a pointer to a structure
766 * which has a pointer to the sigset_t itself followed by a size_t containing
767 * the sigset size.
768 */
Al Viro7e716092020-02-19 09:54:24 -0500769struct sigset_argpack {
770 sigset_t __user *p;
771 size_t size;
772};
773
774static inline int get_sigset_argpack(struct sigset_argpack *to,
775 struct sigset_argpack __user *from)
776{
777 // the path is hot enough for overhead of copy_from_user() to matter
778 if (from) {
779 if (!user_read_access_begin(from, sizeof(*from)))
780 return -EFAULT;
781 unsafe_get_user(to->p, &from->p, Efault);
782 unsafe_get_user(to->size, &from->size, Efault);
783 user_read_access_end();
784 }
785 return 0;
786Efault:
787 user_access_end();
788 return -EFAULT;
789}
790
Heiko Carstensd4e82042009-01-14 14:14:34 +0100791SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
Deepa Dinamanie0247072018-09-19 21:41:07 -0700792 fd_set __user *, exp, struct __kernel_timespec __user *, tsp,
Heiko Carstensd4e82042009-01-14 14:14:34 +0100793 void __user *, sig)
David Woodhouse9f729492006-01-18 17:44:05 -0800794{
Al Viro7e716092020-02-19 09:54:24 -0500795 struct sigset_argpack x = {NULL, 0};
David Woodhouse9f729492006-01-18 17:44:05 -0800796
Al Viro7e716092020-02-19 09:54:24 -0500797 if (get_sigset_argpack(&x, sig))
798 return -EFAULT;
David Woodhouse9f729492006-01-18 17:44:05 -0800799
Al Viro7e716092020-02-19 09:54:24 -0500800 return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_TIMESPEC);
David Woodhouse9f729492006-01-18 17:44:05 -0800801}
David Woodhouse9f729492006-01-18 17:44:05 -0800802
Deepa Dinamanie0247072018-09-19 21:41:07 -0700803#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
804
805SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *, outp,
806 fd_set __user *, exp, struct old_timespec32 __user *, tsp,
807 void __user *, sig)
808{
Al Viro7e716092020-02-19 09:54:24 -0500809 struct sigset_argpack x = {NULL, 0};
Deepa Dinamanie0247072018-09-19 21:41:07 -0700810
Al Viro7e716092020-02-19 09:54:24 -0500811 if (get_sigset_argpack(&x, sig))
812 return -EFAULT;
Deepa Dinamanie0247072018-09-19 21:41:07 -0700813
Al Viro7e716092020-02-19 09:54:24 -0500814 return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_OLD_TIMESPEC);
Deepa Dinamanie0247072018-09-19 21:41:07 -0700815}
816
817#endif
818
Christoph Hellwig5d0e5282010-03-10 15:21:13 -0800819#ifdef __ARCH_WANT_SYS_OLD_SELECT
820struct sel_arg_struct {
821 unsigned long n;
822 fd_set __user *inp, *outp, *exp;
Arnd Bergmann75d319c2019-10-25 22:56:17 +0200823 struct __kernel_old_timeval __user *tvp;
Christoph Hellwig5d0e5282010-03-10 15:21:13 -0800824};
825
826SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
827{
828 struct sel_arg_struct a;
829
830 if (copy_from_user(&a, arg, sizeof(a)))
831 return -EFAULT;
Dominik Brodowski4bdb9ac2018-03-18 07:53:04 +0100832 return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp);
Christoph Hellwig5d0e5282010-03-10 15:21:13 -0800833}
834#endif
835
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836struct poll_list {
837 struct poll_list *next;
838 int len;
Gustavo A. R. Silva5e01fdf2020-08-31 08:25:42 -0500839 struct pollfd entries[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840};
841
842#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
843
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700844/*
845 * Fish for pollable events on the pollfd->fd file descriptor. We're only
846 * interested in events matching the pollfd->events mask, and the result
847 * matching that mask is both recorded in pollfd->revents and returned. The
848 * pwait poll_table will be used by the fd-provided poll handler for waiting,
Hans Verkuil626cf232012-03-23 15:02:27 -0700849 * if pwait->_qproc is non-NULL.
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700850 */
Al Virofb367932017-07-16 23:51:03 -0400851static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
Eliezer Tamircbf55002013-07-08 16:20:34 +0300852 bool *can_busy_poll,
Al Virofb367932017-07-16 23:51:03 -0400853 __poll_t busy_flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854{
Christoph Hellwiga0f8dcf2018-03-05 07:15:25 -0800855 int fd = pollfd->fd;
856 __poll_t mask = 0, filter;
857 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Christoph Hellwiga0f8dcf2018-03-05 07:15:25 -0800859 if (fd < 0)
860 goto out;
861 mask = EPOLLNVAL;
862 f = fdget(fd);
863 if (!f.file)
864 goto out;
865
866 /* userland u16 ->events contains POLL... bitmap */
867 filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP;
Christoph Hellwig9965ed172018-03-05 07:26:05 -0800868 pwait->_key = filter | busy_flag;
869 mask = vfs_poll(f.file, pwait);
870 if (mask & busy_flag)
871 *can_busy_poll = true;
Christoph Hellwiga0f8dcf2018-03-05 07:15:25 -0800872 mask &= filter; /* Mask out unneeded events. */
873 fdput(f);
874
875out:
Al Virofb367932017-07-16 23:51:03 -0400876 /* ... and so does ->revents */
Al Viroc71d2272017-11-29 19:00:41 -0500877 pollfd->revents = mangle_poll(mask);
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700878 return mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879}
880
Mateusz Guzikccec5ee2016-01-06 06:41:53 +0100881static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700882 struct timespec64 *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 poll_table* pt = &wait->pt;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700885 ktime_t expire, *to = NULL;
886 int timed_out = 0, count = 0;
John Stultzda8b44d2016-03-17 14:20:51 -0700887 u64 slack = 0;
Al Virofb367932017-07-16 23:51:03 -0400888 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
Alexander Duyck37056712017-03-24 10:08:18 -0700889 unsigned long busy_start = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
David Woodhouse9f729492006-01-18 17:44:05 -0800891 /* Optimise the no-wait case */
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700892 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
Hans Verkuil626cf232012-03-23 15:02:27 -0700893 pt->_qproc = NULL;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700894 timed_out = 1;
895 }
Oleg Nesterov9bf084f2007-10-16 23:26:18 -0700896
Arjan van de Ven96d2ab42008-09-07 16:08:55 -0700897 if (end_time && !timed_out)
Andrew Morton231f3d32010-10-27 15:34:53 -0700898 slack = select_estimate_accuracy(end_time);
Arjan van de Ven90d6e242008-09-01 15:55:35 -0700899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 for (;;) {
901 struct poll_list *walk;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300902 bool can_busy_loop = false;
David Woodhouse9f729492006-01-18 17:44:05 -0800903
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700904 for (walk = list; walk != NULL; walk = walk->next) {
905 struct pollfd * pfd, * pfd_end;
906
907 pfd = walk->entries;
908 pfd_end = pfd + walk->len;
909 for (; pfd != pfd_end; pfd++) {
910 /*
911 * Fish for events. If we found one, record it
Hans Verkuil626cf232012-03-23 15:02:27 -0700912 * and kill poll_table->_qproc, so we don't
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700913 * needlessly register any other waiters after
914 * this. They'll get immediately deregistered
915 * when we break out and return.
916 */
Eliezer Tamircbf55002013-07-08 16:20:34 +0300917 if (do_pollfd(pfd, pt, &can_busy_loop,
918 busy_flag)) {
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700919 count++;
Hans Verkuil626cf232012-03-23 15:02:27 -0700920 pt->_qproc = NULL;
Eliezer Tamircbf55002013-07-08 16:20:34 +0300921 /* found something, stop busy polling */
922 busy_flag = 0;
923 can_busy_loop = false;
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700924 }
925 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 }
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700927 /*
928 * All waiters have already been registered, so don't provide
Hans Verkuil626cf232012-03-23 15:02:27 -0700929 * a poll_table->_qproc to them on the next loop iteration.
Vadim Lobanov4a4b69f2006-06-23 02:05:16 -0700930 */
Hans Verkuil626cf232012-03-23 15:02:27 -0700931 pt->_qproc = NULL;
Oleg Nesterov9bf084f2007-10-16 23:26:18 -0700932 if (!count) {
933 count = wait->error;
934 if (signal_pending(current))
Oleg Nesterov8cf8b552019-07-16 16:29:56 -0700935 count = -ERESTARTNOHAND;
Oleg Nesterov9bf084f2007-10-16 23:26:18 -0700936 }
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700937 if (count || timed_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 break;
David Woodhouse9f729492006-01-18 17:44:05 -0800939
Eliezer Tamircbf55002013-07-08 16:20:34 +0300940 /* only if found POLL_BUSY_LOOP sockets && not out of time */
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300941 if (can_busy_loop && !need_resched()) {
Alexander Duyck37056712017-03-24 10:08:18 -0700942 if (!busy_start) {
943 busy_start = busy_loop_current_time();
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300944 continue;
945 }
Alexander Duyck37056712017-03-24 10:08:18 -0700946 if (!busy_loop_timeout(busy_start))
Eliezer Tamir76b1e9b2013-07-09 13:09:21 +0300947 continue;
948 }
949 busy_flag = 0;
Eliezer Tamir91e2fd332013-06-28 15:59:35 +0300950
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700951 /*
952 * If this is the first loop and we have a timeout
953 * given, then we convert to ktime_t and set the to
954 * pointer to the expiry value.
955 */
956 if (end_time && !to) {
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700957 expire = timespec64_to_ktime(*end_time);
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700958 to = &expire;
David Woodhouse9f729492006-01-18 17:44:05 -0800959 }
960
Tejun Heo5f820f62009-01-06 14:40:59 -0800961 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -0700962 timed_out = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 return count;
965}
966
Andi Kleen70674f92006-03-28 01:56:33 -0800967#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
968 sizeof(struct pollfd))
969
Al Viroe99ca562017-04-08 16:50:24 -0400970static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
Deepa Dinamani766b9f92016-05-19 17:09:05 -0700971 struct timespec64 *end_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972{
973 struct poll_wqueues table;
Gustavo A. R. Silva43e11fa2019-07-16 16:30:58 -0700974 int err = -EFAULT, fdcount, len;
Jes Sorensen30c14e42006-03-31 11:18:57 -0500975 /* Allocate small arguments on the stack to save memory and be
976 faster - use long to make sure the buffer is aligned properly
977 on 64 bit archs to avoid unaligned access */
978 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
Oleg Nesterov252e5722007-10-16 23:26:17 -0700979 struct poll_list *const head = (struct poll_list *)stack_pps;
980 struct poll_list *walk = head;
981 unsigned long todo = nfds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
Jiri Slabyd554ed892010-03-05 13:42:42 -0800983 if (nfds > rlimit(RLIMIT_NOFILE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 return -EINVAL;
985
Oleg Nesterov252e5722007-10-16 23:26:17 -0700986 len = min_t(unsigned int, nfds, N_STACK_PPS);
987 for (;;) {
988 walk->next = NULL;
989 walk->len = len;
990 if (!len)
991 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
Oleg Nesterov252e5722007-10-16 23:26:17 -0700993 if (copy_from_user(walk->entries, ufds + nfds-todo,
994 sizeof(struct pollfd) * walk->len))
995 goto out_fds;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Oleg Nesterov252e5722007-10-16 23:26:17 -0700997 todo -= walk->len;
998 if (!todo)
999 break;
1000
1001 len = min(todo, POLLFD_PER_PAGE);
Gustavo A. R. Silva43e11fa2019-07-16 16:30:58 -07001002 walk = walk->next = kmalloc(struct_size(walk, entries, len),
1003 GFP_KERNEL);
Oleg Nesterov252e5722007-10-16 23:26:17 -07001004 if (!walk) {
1005 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 goto out_fds;
1007 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 }
David Woodhouse9f729492006-01-18 17:44:05 -08001009
Oleg Nesterov252e5722007-10-16 23:26:17 -07001010 poll_initwait(&table);
Mateusz Guzikccec5ee2016-01-06 06:41:53 +01001011 fdcount = do_poll(head, &table, end_time);
Oleg Nesterov252e5722007-10-16 23:26:17 -07001012 poll_freewait(&table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
Linus Torvaldsbc880f22021-01-07 09:43:54 -08001014 if (!user_write_access_begin(ufds, nfds * sizeof(*ufds)))
1015 goto out_fds;
1016
Oleg Nesterov252e5722007-10-16 23:26:17 -07001017 for (walk = head; walk; walk = walk->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 struct pollfd *fds = walk->entries;
1019 int j;
1020
Linus Torvaldsbc880f22021-01-07 09:43:54 -08001021 for (j = walk->len; j; fds++, ufds++, j--)
1022 unsafe_put_user(fds->revents, &ufds->revents, Efault);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 }
Linus Torvaldsbc880f22021-01-07 09:43:54 -08001024 user_write_access_end();
Oleg Nesterov252e5722007-10-16 23:26:17 -07001025
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 err = fdcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027out_fds:
Oleg Nesterov252e5722007-10-16 23:26:17 -07001028 walk = head->next;
1029 while (walk) {
1030 struct poll_list *pos = walk;
1031 walk = walk->next;
1032 kfree(pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 }
Oleg Nesterov252e5722007-10-16 23:26:17 -07001034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 return err;
Linus Torvaldsbc880f22021-01-07 09:43:54 -08001036
1037Efault:
1038 user_write_access_end();
1039 err = -EFAULT;
1040 goto out_fds;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041}
David Woodhouse9f729492006-01-18 17:44:05 -08001042
Chris Wright3075d9d2007-10-16 23:27:18 -07001043static long do_restart_poll(struct restart_block *restart_block)
1044{
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001045 struct pollfd __user *ufds = restart_block->poll.ufds;
1046 int nfds = restart_block->poll.nfds;
Deepa Dinamani766b9f92016-05-19 17:09:05 -07001047 struct timespec64 *to = NULL, end_time;
Chris Wright3075d9d2007-10-16 23:27:18 -07001048 int ret;
1049
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001050 if (restart_block->poll.has_timeout) {
1051 end_time.tv_sec = restart_block->poll.tv_sec;
1052 end_time.tv_nsec = restart_block->poll.tv_nsec;
1053 to = &end_time;
1054 }
1055
1056 ret = do_sys_poll(ufds, nfds, to);
1057
Oleg Nesterov4523e642021-02-01 18:46:41 +01001058 if (ret == -ERESTARTNOHAND)
1059 ret = set_restart_fn(restart_block, do_restart_poll);
1060
Chris Wright3075d9d2007-10-16 23:27:18 -07001061 return ret;
1062}
1063
Heiko Carstens5a8a82b2009-01-14 14:14:25 +01001064SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
Linus Torvaldsfaf30902012-02-21 17:24:20 -08001065 int, timeout_msecs)
David Woodhouse9f729492006-01-18 17:44:05 -08001066{
Deepa Dinamani766b9f92016-05-19 17:09:05 -07001067 struct timespec64 end_time, *to = NULL;
Chris Wright3075d9d2007-10-16 23:27:18 -07001068 int ret;
David Woodhouse9f729492006-01-18 17:44:05 -08001069
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001070 if (timeout_msecs >= 0) {
1071 to = &end_time;
1072 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
1073 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
David Woodhouse9f729492006-01-18 17:44:05 -08001074 }
1075
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001076 ret = do_sys_poll(ufds, nfds, to);
1077
Oleg Nesterov8cf8b552019-07-16 16:29:56 -07001078 if (ret == -ERESTARTNOHAND) {
Chris Wright3075d9d2007-10-16 23:27:18 -07001079 struct restart_block *restart_block;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001080
Andy Lutomirskif56141e2015-02-12 15:01:14 -08001081 restart_block = &current->restart_block;
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001082 restart_block->poll.ufds = ufds;
1083 restart_block->poll.nfds = nfds;
1084
1085 if (timeout_msecs >= 0) {
1086 restart_block->poll.tv_sec = end_time.tv_sec;
1087 restart_block->poll.tv_nsec = end_time.tv_nsec;
1088 restart_block->poll.has_timeout = 1;
1089 } else
1090 restart_block->poll.has_timeout = 0;
1091
Oleg Nesterov4523e642021-02-01 18:46:41 +01001092 ret = set_restart_fn(restart_block, do_restart_poll);
Chris Wright3075d9d2007-10-16 23:27:18 -07001093 }
1094 return ret;
David Woodhouse9f729492006-01-18 17:44:05 -08001095}
1096
Heiko Carstensd4e82042009-01-14 14:14:34 +01001097SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
Deepa Dinamani8bd27a32018-09-19 21:41:06 -07001098 struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask,
Heiko Carstensd4e82042009-01-14 14:14:34 +01001099 size_t, sigsetsize)
David Woodhouse9f729492006-01-18 17:44:05 -08001100{
Deepa Dinamani36819ad2017-08-04 21:12:31 -07001101 struct timespec64 ts, end_time, *to = NULL;
David Woodhouse9f729492006-01-18 17:44:05 -08001102 int ret;
1103
1104 if (tsp) {
Deepa Dinamani36819ad2017-08-04 21:12:31 -07001105 if (get_timespec64(&ts, tsp))
David Woodhouse9f729492006-01-18 17:44:05 -08001106 return -EFAULT;
1107
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001108 to = &end_time;
1109 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1110 return -EINVAL;
David Woodhouse9f729492006-01-18 17:44:05 -08001111 }
1112
Oleg Nesterovb7724342019-07-16 16:29:53 -07001113 ret = set_user_sigmask(sigmask, sigsetsize);
Deepa Dinamanided653c2018-09-19 21:41:04 -07001114 if (ret)
1115 return ret;
David Woodhouse9f729492006-01-18 17:44:05 -08001116
Arjan van de Ven8ff3e8e2008-08-31 08:26:40 -07001117 ret = do_sys_poll(ufds, nfds, to);
Oleg Nesterovac301022019-07-16 16:29:59 -07001118 return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
David Woodhouse9f729492006-01-18 17:44:05 -08001119}
Al Viroe99ca562017-04-08 16:50:24 -04001120
Deepa Dinamani8bd27a32018-09-19 21:41:06 -07001121#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
1122
1123SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds,
1124 struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask,
1125 size_t, sigsetsize)
1126{
Deepa Dinamani8bd27a32018-09-19 21:41:06 -07001127 struct timespec64 ts, end_time, *to = NULL;
1128 int ret;
1129
1130 if (tsp) {
1131 if (get_old_timespec32(&ts, tsp))
1132 return -EFAULT;
1133
1134 to = &end_time;
1135 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1136 return -EINVAL;
1137 }
1138
Oleg Nesterovb7724342019-07-16 16:29:53 -07001139 ret = set_user_sigmask(sigmask, sigsetsize);
Deepa Dinamani8bd27a32018-09-19 21:41:06 -07001140 if (ret)
1141 return ret;
1142
1143 ret = do_sys_poll(ufds, nfds, to);
Oleg Nesterovac301022019-07-16 16:29:59 -07001144 return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
Deepa Dinamani8bd27a32018-09-19 21:41:06 -07001145}
1146#endif
1147
Al Viroe99ca562017-04-08 16:50:24 -04001148#ifdef CONFIG_COMPAT
1149#define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t))
1150
Al Viroe99ca562017-04-08 16:50:24 -04001151/*
1152 * Ooo, nasty. We need here to frob 32-bit unsigned longs to
1153 * 64-bit unsigned longs.
1154 */
1155static
1156int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1157 unsigned long *fdset)
1158{
Al Viroe99ca562017-04-08 16:50:24 -04001159 if (ufdset) {
Al Viro464d6242017-06-04 03:24:26 -04001160 return compat_get_bitmap(fdset, ufdset, nr);
Al Viroe99ca562017-04-08 16:50:24 -04001161 } else {
Helge Deller79de3cb2017-08-23 22:37:00 +02001162 zero_fd_set(nr, fdset);
Al Viro464d6242017-06-04 03:24:26 -04001163 return 0;
Al Viroe99ca562017-04-08 16:50:24 -04001164 }
Al Viroe99ca562017-04-08 16:50:24 -04001165}
1166
1167static
1168int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1169 unsigned long *fdset)
1170{
Al Viroe99ca562017-04-08 16:50:24 -04001171 if (!ufdset)
1172 return 0;
Al Viro464d6242017-06-04 03:24:26 -04001173 return compat_put_bitmap(ufdset, fdset, nr);
Al Viroe99ca562017-04-08 16:50:24 -04001174}
1175
1176
1177/*
1178 * This is a virtual copy of sys_select from fs/select.c and probably
1179 * should be compared to it from time to time
1180 */
1181
1182/*
1183 * We can actually return ERESTARTSYS instead of EINTR, but I'd
1184 * like to be certain this leads to no problems. So I return
1185 * EINTR just for safety.
1186 *
1187 * Update: ERESTARTSYS breaks at least the xview clock binary, so
1188 * I'm trying ERESTARTNOHAND which restart only when you want to.
1189 */
1190static int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1191 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
Deepa Dinamani36819ad2017-08-04 21:12:31 -07001192 struct timespec64 *end_time)
Al Viroe99ca562017-04-08 16:50:24 -04001193{
1194 fd_set_bits fds;
1195 void *bits;
1196 int size, max_fds, ret = -EINVAL;
1197 struct fdtable *fdt;
1198 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
1199
1200 if (n < 0)
1201 goto out_nofds;
1202
1203 /* max_fds can increase, so grab it once to avoid race */
1204 rcu_read_lock();
1205 fdt = files_fdtable(current->files);
1206 max_fds = fdt->max_fds;
1207 rcu_read_unlock();
1208 if (n > max_fds)
1209 n = max_fds;
1210
1211 /*
1212 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
1213 * since we used fdset we need to allocate memory in units of
1214 * long-words.
1215 */
1216 size = FDS_BYTES(n);
1217 bits = stack_fds;
1218 if (size > sizeof(stack_fds) / 6) {
Kees Cook6da2ec52018-06-12 13:55:00 -07001219 bits = kmalloc_array(6, size, GFP_KERNEL);
Al Viroe99ca562017-04-08 16:50:24 -04001220 ret = -ENOMEM;
1221 if (!bits)
1222 goto out_nofds;
1223 }
1224 fds.in = (unsigned long *) bits;
1225 fds.out = (unsigned long *) (bits + size);
1226 fds.ex = (unsigned long *) (bits + 2*size);
1227 fds.res_in = (unsigned long *) (bits + 3*size);
1228 fds.res_out = (unsigned long *) (bits + 4*size);
1229 fds.res_ex = (unsigned long *) (bits + 5*size);
1230
1231 if ((ret = compat_get_fd_set(n, inp, fds.in)) ||
1232 (ret = compat_get_fd_set(n, outp, fds.out)) ||
1233 (ret = compat_get_fd_set(n, exp, fds.ex)))
1234 goto out;
1235 zero_fd_set(n, fds.res_in);
1236 zero_fd_set(n, fds.res_out);
1237 zero_fd_set(n, fds.res_ex);
1238
1239 ret = do_select(n, &fds, end_time);
1240
1241 if (ret < 0)
1242 goto out;
1243 if (!ret) {
1244 ret = -ERESTARTNOHAND;
1245 if (signal_pending(current))
1246 goto out;
1247 ret = 0;
1248 }
1249
1250 if (compat_set_fd_set(n, inp, fds.res_in) ||
1251 compat_set_fd_set(n, outp, fds.res_out) ||
1252 compat_set_fd_set(n, exp, fds.res_ex))
1253 ret = -EFAULT;
1254out:
1255 if (bits != stack_fds)
1256 kfree(bits);
1257out_nofds:
1258 return ret;
1259}
1260
Dominik Brodowski05585e42018-03-20 19:33:48 +01001261static int do_compat_select(int n, compat_ulong_t __user *inp,
1262 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02001263 struct old_timeval32 __user *tvp)
Al Viroe99ca562017-04-08 16:50:24 -04001264{
Deepa Dinamani36819ad2017-08-04 21:12:31 -07001265 struct timespec64 end_time, *to = NULL;
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02001266 struct old_timeval32 tv;
Al Viroe99ca562017-04-08 16:50:24 -04001267 int ret;
1268
1269 if (tvp) {
1270 if (copy_from_user(&tv, tvp, sizeof(tv)))
1271 return -EFAULT;
1272
1273 to = &end_time;
1274 if (poll_select_set_timeout(to,
1275 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
1276 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
1277 return -EINVAL;
1278 }
1279
1280 ret = compat_core_sys_select(n, inp, outp, exp, to);
Oleg Nesterovac301022019-07-16 16:29:59 -07001281 return poll_select_finish(&end_time, tvp, PT_OLD_TIMEVAL, ret);
Al Viroe99ca562017-04-08 16:50:24 -04001282}
1283
Dominik Brodowski05585e42018-03-20 19:33:48 +01001284COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
1285 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02001286 struct old_timeval32 __user *, tvp)
Dominik Brodowski05585e42018-03-20 19:33:48 +01001287{
1288 return do_compat_select(n, inp, outp, exp, tvp);
1289}
1290
Al Viroe99ca562017-04-08 16:50:24 -04001291struct compat_sel_arg_struct {
1292 compat_ulong_t n;
1293 compat_uptr_t inp;
1294 compat_uptr_t outp;
1295 compat_uptr_t exp;
1296 compat_uptr_t tvp;
1297};
1298
1299COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
1300{
1301 struct compat_sel_arg_struct a;
1302
1303 if (copy_from_user(&a, arg, sizeof(a)))
1304 return -EFAULT;
Dominik Brodowski05585e42018-03-20 19:33:48 +01001305 return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1306 compat_ptr(a.exp), compat_ptr(a.tvp));
Al Viroe99ca562017-04-08 16:50:24 -04001307}
1308
1309static long do_compat_pselect(int n, compat_ulong_t __user *inp,
1310 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
Deepa Dinamanie0247072018-09-19 21:41:07 -07001311 void __user *tsp, compat_sigset_t __user *sigmask,
1312 compat_size_t sigsetsize, enum poll_time_type type)
Al Viroe99ca562017-04-08 16:50:24 -04001313{
Deepa Dinamani36819ad2017-08-04 21:12:31 -07001314 struct timespec64 ts, end_time, *to = NULL;
Al Viroe99ca562017-04-08 16:50:24 -04001315 int ret;
1316
1317 if (tsp) {
Deepa Dinamanie0247072018-09-19 21:41:07 -07001318 switch (type) {
1319 case PT_OLD_TIMESPEC:
1320 if (get_old_timespec32(&ts, tsp))
1321 return -EFAULT;
1322 break;
1323 case PT_TIMESPEC:
1324 if (get_timespec64(&ts, tsp))
1325 return -EFAULT;
1326 break;
1327 default:
1328 BUG();
1329 }
Al Viroe99ca562017-04-08 16:50:24 -04001330
1331 to = &end_time;
1332 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1333 return -EINVAL;
1334 }
1335
Oleg Nesterovb7724342019-07-16 16:29:53 -07001336 ret = set_compat_user_sigmask(sigmask, sigsetsize);
Deepa Dinamanided653c2018-09-19 21:41:04 -07001337 if (ret)
1338 return ret;
Al Viroe99ca562017-04-08 16:50:24 -04001339
1340 ret = compat_core_sys_select(n, inp, outp, exp, to);
Oleg Nesterovac301022019-07-16 16:29:59 -07001341 return poll_select_finish(&end_time, tsp, type, ret);
Al Viroe99ca562017-04-08 16:50:24 -04001342}
1343
Al Viro7e716092020-02-19 09:54:24 -05001344struct compat_sigset_argpack {
1345 compat_uptr_t p;
1346 compat_size_t size;
1347};
1348static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to,
1349 struct compat_sigset_argpack __user *from)
1350{
1351 if (from) {
1352 if (!user_read_access_begin(from, sizeof(*from)))
1353 return -EFAULT;
1354 unsafe_get_user(to->p, &from->p, Efault);
1355 unsafe_get_user(to->size, &from->size, Efault);
1356 user_read_access_end();
1357 }
1358 return 0;
1359Efault:
1360 user_access_end();
1361 return -EFAULT;
1362}
1363
Deepa Dinamanie0247072018-09-19 21:41:07 -07001364COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
1365 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1366 struct __kernel_timespec __user *, tsp, void __user *, sig)
1367{
Al Viro7e716092020-02-19 09:54:24 -05001368 struct compat_sigset_argpack x = {0, 0};
Deepa Dinamanie0247072018-09-19 21:41:07 -07001369
Al Viro7e716092020-02-19 09:54:24 -05001370 if (get_compat_sigset_argpack(&x, sig))
1371 return -EFAULT;
Deepa Dinamanie0247072018-09-19 21:41:07 -07001372
Al Viro7e716092020-02-19 09:54:24 -05001373 return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
1374 x.size, PT_TIMESPEC);
Deepa Dinamanie0247072018-09-19 21:41:07 -07001375}
1376
1377#if defined(CONFIG_COMPAT_32BIT_TIME)
1378
Arnd Bergmann8dabe722019-01-07 00:33:08 +01001379COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp,
Al Viroe99ca562017-04-08 16:50:24 -04001380 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02001381 struct old_timespec32 __user *, tsp, void __user *, sig)
Al Viroe99ca562017-04-08 16:50:24 -04001382{
Al Viro7e716092020-02-19 09:54:24 -05001383 struct compat_sigset_argpack x = {0, 0};
Al Viroe99ca562017-04-08 16:50:24 -04001384
Al Viro7e716092020-02-19 09:54:24 -05001385 if (get_compat_sigset_argpack(&x, sig))
1386 return -EFAULT;
Deepa Dinamanie0247072018-09-19 21:41:07 -07001387
Al Viro7e716092020-02-19 09:54:24 -05001388 return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
1389 x.size, PT_OLD_TIMESPEC);
Al Viroe99ca562017-04-08 16:50:24 -04001390}
1391
Deepa Dinamanie0247072018-09-19 21:41:07 -07001392#endif
1393
Deepa Dinamani8bd27a32018-09-19 21:41:06 -07001394#if defined(CONFIG_COMPAT_32BIT_TIME)
Arnd Bergmann8dabe722019-01-07 00:33:08 +01001395COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds,
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02001396 unsigned int, nfds, struct old_timespec32 __user *, tsp,
Al Viroe99ca562017-04-08 16:50:24 -04001397 const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1398{
Deepa Dinamani36819ad2017-08-04 21:12:31 -07001399 struct timespec64 ts, end_time, *to = NULL;
Al Viroe99ca562017-04-08 16:50:24 -04001400 int ret;
1401
1402 if (tsp) {
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02001403 if (get_old_timespec32(&ts, tsp))
Al Viroe99ca562017-04-08 16:50:24 -04001404 return -EFAULT;
1405
1406 to = &end_time;
1407 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1408 return -EINVAL;
1409 }
1410
Oleg Nesterovb7724342019-07-16 16:29:53 -07001411 ret = set_compat_user_sigmask(sigmask, sigsetsize);
Deepa Dinamanided653c2018-09-19 21:41:04 -07001412 if (ret)
1413 return ret;
Al Viroe99ca562017-04-08 16:50:24 -04001414
1415 ret = do_sys_poll(ufds, nfds, to);
Oleg Nesterovac301022019-07-16 16:29:59 -07001416 return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
Al Viroe99ca562017-04-08 16:50:24 -04001417}
1418#endif
Deepa Dinamani8bd27a32018-09-19 21:41:06 -07001419
1420/* New compat syscall for 64 bit time_t*/
1421COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds,
1422 unsigned int, nfds, struct __kernel_timespec __user *, tsp,
1423 const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1424{
Deepa Dinamani8bd27a32018-09-19 21:41:06 -07001425 struct timespec64 ts, end_time, *to = NULL;
1426 int ret;
1427
1428 if (tsp) {
1429 if (get_timespec64(&ts, tsp))
1430 return -EFAULT;
1431
1432 to = &end_time;
1433 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1434 return -EINVAL;
1435 }
1436
Oleg Nesterovb7724342019-07-16 16:29:53 -07001437 ret = set_compat_user_sigmask(sigmask, sigsetsize);
Deepa Dinamani8bd27a32018-09-19 21:41:06 -07001438 if (ret)
1439 return ret;
1440
1441 ret = do_sys_poll(ufds, nfds, to);
Oleg Nesterovac301022019-07-16 16:29:59 -07001442 return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
Deepa Dinamani8bd27a32018-09-19 21:41:06 -07001443}
1444
1445#endif