blob: b1c97664a730447f4c54c05a060b9396e8249170 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Implement CPU time clocks for the POSIX clock interface.
4 */
5
Ingo Molnar3f07c012017-02-08 18:51:30 +01006#include <linux/sched/signal.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +01007#include <linux/sched/cputime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/posix-timers.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/errno.h>
Roman Zippelf8bd2252008-05-01 04:34:31 -070010#include <linux/math64.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080011#include <linux/uaccess.h>
Frank Mayharbb34d922008-09-12 09:54:39 -070012#include <linux/kernel_stat.h>
Xiao Guangrong3f0a5252009-08-10 10:52:30 +080013#include <trace/events/timer.h>
Frederic Weisbeckera8572162013-04-18 01:31:13 +020014#include <linux/tick.h>
15#include <linux/workqueue.h>
Al Viroedbeda42017-06-07 09:42:31 +010016#include <linux/compat.h>
Juri Lelli34be3932017-12-12 12:10:24 +010017#include <linux/sched/deadline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Thomas Gleixnerbab0aae2017-05-30 23:15:41 +020019#include "posix-timers.h"
20
Thomas Gleixnerf37fb0a2017-05-30 23:15:47 +020021static void posix_cpu_timer_rearm(struct k_itimer *timer);
22
Frank Mayharf06febc2008-09-12 09:54:39 -070023/*
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -080024 * Called after updating RLIMIT_CPU to run cpu timer and update
25 * tsk->signal->cputime_expires expiration cache if necessary. Needs
26 * siglock protection since other code may update expiration cache as
27 * well.
Frank Mayharf06febc2008-09-12 09:54:39 -070028 */
Jiri Slaby5ab46b32009-08-28 14:05:12 +020029void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
Frank Mayharf06febc2008-09-12 09:54:39 -070030{
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +010031 u64 nsecs = rlim_new * NSEC_PER_SEC;
Frank Mayharf06febc2008-09-12 09:54:39 -070032
Jiri Slaby5ab46b32009-08-28 14:05:12 +020033 spin_lock_irq(&task->sighand->siglock);
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +010034 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
Jiri Slaby5ab46b32009-08-28 14:05:12 +020035 spin_unlock_irq(&task->sighand->siglock);
Frank Mayharf06febc2008-09-12 09:54:39 -070036}
37
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020038/*
39 * Functions for validating access to tasks.
40 */
41static struct task_struct *lookup_task(const pid_t pid, bool thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -070042{
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 struct task_struct *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020045 if (!pid)
46 return thread ? current : current->group_leader;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020048 p = find_task_by_vpid(pid);
49 if (!p || p == current)
50 return p;
51 if (thread)
52 return same_thread_group(p, current) ? p : NULL;
53 if (p == current)
54 return p;
55 return has_group_leader_pid(p) ? p : NULL;
56}
57
58static struct task_struct *__get_task_for_clock(const clockid_t clock,
59 bool getref)
60{
61 const bool thread = !!CPUCLOCK_PERTHREAD(clock);
62 const pid_t pid = CPUCLOCK_PID(clock);
63 struct task_struct *p;
64
65 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
66 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Sergey Senozhatskyc0deae82010-11-03 18:52:56 +020068 rcu_read_lock();
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020069 p = lookup_task(pid, thread);
70 if (p && getref)
71 get_task_struct(p);
Sergey Senozhatskyc0deae82010-11-03 18:52:56 +020072 rcu_read_unlock();
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020073 return p;
74}
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020076static inline struct task_struct *get_task_for_clock(const clockid_t clock)
77{
78 return __get_task_for_clock(clock, true);
79}
80
81static inline int validate_clock_permissions(const clockid_t clock)
82{
83 return __get_task_for_clock(clock, false) ? 0 : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084}
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086/*
87 * Update expiry time from increment, and increase overrun count,
88 * given the current clock sample.
89 */
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +010090static void bump_cpu_timer(struct k_itimer *timer, u64 now)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
92 int i;
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +010093 u64 delta, incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Thomas Gleixner16118792019-01-11 14:33:17 +010095 if (!timer->it_interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 return;
97
Frederic Weisbecker55ccb612013-06-28 00:06:42 +000098 if (now < timer->it.cpu.expires)
99 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Thomas Gleixner16118792019-01-11 14:33:17 +0100101 incr = timer->it_interval;
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000102 delta = now + incr - timer->it.cpu.expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000104 /* Don't use (incr*2 < delta), incr*2 might overflow. */
105 for (i = 0; incr < delta - incr; i++)
106 incr = incr << 1;
107
108 for (; i >= 0; incr >>= 1, i--) {
109 if (delta < incr)
110 continue;
111
112 timer->it.cpu.expires += incr;
Thomas Gleixner78c9c4d2018-06-26 15:21:32 +0200113 timer->it_overrun += 1LL << i;
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000114 delta -= incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 }
116}
117
Frederic Weisbecker555347f2013-04-19 16:17:38 +0200118/**
119 * task_cputime_zero - Check a task_cputime struct for all zero fields.
120 *
121 * @cputime: The struct to compare.
122 *
123 * Checks @cputime to see if all fields are zero. Returns true if all fields
124 * are zero, false if any field is nonzero.
125 */
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100126static inline int task_cputime_zero(const struct task_cputime *cputime)
Frederic Weisbecker555347f2013-04-19 16:17:38 +0200127{
128 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
129 return 1;
130 return 0;
131}
132
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000133static int
Deepa Dinamanid2e3e0c2017-03-26 12:04:15 -0700134posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
Thomas Gleixner6ae40e32019-08-21 21:08:48 +0200136 int error = validate_clock_permissions(which_clock);
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 if (!error) {
139 tp->tv_sec = 0;
140 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
141 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
142 /*
143 * If sched_clock is using a cycle counter, we
144 * don't have any idea of its true resolution
145 * exported, but it is much more than 1s/HZ.
146 */
147 tp->tv_nsec = 1;
148 }
149 }
150 return error;
151}
152
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000153static int
Thomas Gleixner6ae40e32019-08-21 21:08:48 +0200154posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155{
Thomas Gleixner6ae40e32019-08-21 21:08:48 +0200156 int error = validate_clock_permissions(clock);
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 /*
159 * You can never reset a CPU clock, but we check for other errors
160 * in the call before failing with EPERM.
161 */
Thomas Gleixner6ae40e32019-08-21 21:08:48 +0200162 return error ? : -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165/*
Thomas Gleixner2092c1d42019-08-21 21:09:00 +0200166 * Sample a per-thread clock for the given task. clkid is validated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 */
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200168static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
Thomas Gleixnerab693c52019-08-21 21:09:03 +0200170 u64 utime, stime;
171
172 if (clkid == CPUCLOCK_SCHED)
173 return task_sched_runtime(p);
174
175 task_cputime(p, &utime, &stime);
176
Thomas Gleixner2092c1d42019-08-21 21:09:00 +0200177 switch (clkid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 case CPUCLOCK_PROF:
Thomas Gleixnerab693c52019-08-21 21:09:03 +0200179 return utime + stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 case CPUCLOCK_VIRT:
Thomas Gleixnerab693c52019-08-21 21:09:03 +0200181 return utime;
Thomas Gleixner2092c1d42019-08-21 21:09:00 +0200182 default:
183 WARN_ON_ONCE(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 }
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200185 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
Jason Low10180162015-04-28 13:00:22 -0700188/*
189 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
190 * to avoid race conditions with concurrent updates to cputime.
191 */
192static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100193{
Jason Low10180162015-04-28 13:00:22 -0700194 u64 curr_cputime;
195retry:
196 curr_cputime = atomic64_read(cputime);
197 if (sum_cputime > curr_cputime) {
198 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
199 goto retry;
200 }
201}
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100202
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100203static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
Jason Low10180162015-04-28 13:00:22 -0700204{
Jason Low71107442015-04-28 13:00:24 -0700205 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
206 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
207 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
Jason Low10180162015-04-28 13:00:22 -0700208}
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100209
Jason Low71107442015-04-28 13:00:24 -0700210/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100211static inline void sample_cputime_atomic(struct task_cputime *times,
Jason Low71107442015-04-28 13:00:24 -0700212 struct task_cputime_atomic *atomic_times)
Jason Low10180162015-04-28 13:00:22 -0700213{
Jason Low71107442015-04-28 13:00:24 -0700214 times->utime = atomic64_read(&atomic_times->utime);
215 times->stime = atomic64_read(&atomic_times->stime);
216 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100217}
218
Thomas Gleixner19298fb2019-08-21 21:08:51 +0200219/**
220 * thread_group_sample_cputime - Sample cputime for a given task
221 * @tsk: Task for which cputime needs to be started
222 * @iimes: Storage for time samples
223 *
224 * Called from sys_getitimer() to calculate the expiry time of an active
225 * timer. That means group cputime accounting is already active. Called
226 * with task sighand lock held.
227 *
228 * Updates @times with an uptodate sample of the thread group cputimes.
229 */
230void thread_group_sample_cputime(struct task_struct *tsk,
231 struct task_cputime *times)
232{
233 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
234
235 WARN_ON_ONCE(!cputimer->running);
236
237 sample_cputime_atomic(times, &cputimer->cputime_atomic);
238}
239
Thomas Gleixnerc506bef42019-08-21 21:08:54 +0200240/**
241 * thread_group_start_cputime - Start cputime and return a sample
242 * @tsk: Task for which cputime needs to be started
243 * @iimes: Storage for time samples
244 *
245 * The thread group cputime accouting is avoided when there are no posix
246 * CPU timers armed. Before starting a timer it's required to check whether
247 * the time accounting is active. If not, a full update of the atomic
248 * accounting store needs to be done and the accounting enabled.
249 *
250 * Updates @times with an uptodate sample of the thread group cputimes.
251 */
252static void
253thread_group_start_cputime(struct task_struct *tsk, struct task_cputime *times)
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100254{
255 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100256 struct task_cputime sum;
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100257
Jason Low10180162015-04-28 13:00:22 -0700258 /* Check if cputimer isn't running. This is accessed without locking. */
259 if (!READ_ONCE(cputimer->running)) {
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100260 /*
261 * The POSIX timer interface allows for absolute time expiry
262 * values through the TIMER_ABSTIME flag, therefore we have
Jason Low10180162015-04-28 13:00:22 -0700263 * to synchronize the timer to the clock every time we start it.
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100264 */
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100265 thread_group_cputime(tsk, &sum);
Jason Low71107442015-04-28 13:00:24 -0700266 update_gt_cputime(&cputimer->cputime_atomic, &sum);
Jason Low10180162015-04-28 13:00:22 -0700267
268 /*
269 * We're setting cputimer->running without a lock. Ensure
270 * this only gets written to in one operation. We set
271 * running after update_gt_cputime() as a small optimization,
272 * but barriers are not required because update_gt_cputime()
273 * can handle concurrent updates.
274 */
Jason Lowd5c373e2015-10-14 12:07:55 -0700275 WRITE_ONCE(cputimer->running, true);
Jason Low10180162015-04-28 13:00:22 -0700276 }
Jason Low71107442015-04-28 13:00:24 -0700277 sample_cputime_atomic(times, &cputimer->cputime_atomic);
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100278}
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280/*
Thomas Gleixner24ab7f52019-08-21 21:08:55 +0200281 * Sample a process (thread group) clock for the given task clkid. If the
282 * group's cputime accounting is already enabled, read the atomic
283 * store. Otherwise a full update is required. Task's sighand lock must be
Thomas Gleixner2092c1d42019-08-21 21:09:00 +0200284 * held to protect the task traversal on a full update. clkid is already
285 * validated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 */
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200287static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
288 bool start)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289{
Thomas Gleixner24ab7f52019-08-21 21:08:55 +0200290 struct thread_group_cputimer *cputimer = &p->signal->cputimer;
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100291 struct task_cputime cputime;
Frank Mayharbb34d922008-09-12 09:54:39 -0700292
Thomas Gleixner24ab7f52019-08-21 21:08:55 +0200293 if (!READ_ONCE(cputimer->running)) {
294 if (start)
295 thread_group_start_cputime(p, &cputime);
296 else
297 thread_group_cputime(p, &cputime);
298 } else {
299 sample_cputime_atomic(&cputime, &cputimer->cputime_atomic);
300 }
301
Thomas Gleixner2092c1d42019-08-21 21:09:00 +0200302 switch (clkid) {
Frank Mayharbb34d922008-09-12 09:54:39 -0700303 case CPUCLOCK_PROF:
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200304 return cputime.utime + cputime.stime;
Frank Mayharbb34d922008-09-12 09:54:39 -0700305 case CPUCLOCK_VIRT:
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200306 return cputime.utime;
Frank Mayharbb34d922008-09-12 09:54:39 -0700307 case CPUCLOCK_SCHED:
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200308 return cputime.sum_exec_runtime;
Thomas Gleixner2092c1d42019-08-21 21:09:00 +0200309 default:
310 WARN_ON_ONCE(1);
Frank Mayharbb34d922008-09-12 09:54:39 -0700311 }
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200312 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200315static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
Frederic Weisbecker33ab0fe2013-10-11 17:41:11 +0200316{
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200317 const clockid_t clkid = CPUCLOCK_WHICH(clock);
318 struct task_struct *tsk;
319 u64 t;
Frederic Weisbecker33ab0fe2013-10-11 17:41:11 +0200320
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200321 tsk = get_task_for_clock(clock);
322 if (!tsk)
323 return -EINVAL;
Frederic Weisbecker33ab0fe2013-10-11 17:41:11 +0200324
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200325 if (CPUCLOCK_PERTHREAD(clock))
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200326 t = cpu_clock_sample(clkid, tsk);
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200327 else
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200328 t = cpu_clock_sample_group(clkid, tsk, false);
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200329 put_task_struct(tsk);
Frederic Weisbecker33ab0fe2013-10-11 17:41:11 +0200330
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200331 *tp = ns_to_timespec64(t);
332 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335/*
336 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
Stanislaw Gruszkaba5ea952009-11-17 14:14:13 -0800337 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
338 * new timer already all-zeros initialized.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 */
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000340static int posix_cpu_timer_create(struct k_itimer *new_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
Thomas Gleixnere5a8b652019-08-21 21:08:50 +0200342 struct task_struct *p = get_task_for_clock(new_timer->it_clock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
Thomas Gleixnere5a8b652019-08-21 21:08:50 +0200344 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 return -EINVAL;
346
Thomas Gleixnerd97bb752017-05-30 23:15:44 +0200347 new_timer->kclock = &clock_posix_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 new_timer->it.cpu.task = p;
Thomas Gleixnere5a8b652019-08-21 21:08:50 +0200350 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351}
352
353/*
354 * Clean up a CPU-clock timer that is about to be destroyed.
355 * This is called from timer deletion with the timer already locked.
356 * If we return TIMER_RETRY, it's necessary to release the timer's lock
357 * and try again. (This happens when the timer is in the middle of firing.)
358 */
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000359static int posix_cpu_timer_del(struct k_itimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Oleg Nesterov108150e2005-10-23 20:25:39 +0400361 int ret = 0;
Frederic Weisbecker3d7a1422013-10-11 17:41:11 +0200362 unsigned long flags;
363 struct sighand_struct *sighand;
364 struct task_struct *p = timer->it.cpu.task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Thomas Gleixner692117c2019-08-19 16:31:46 +0200366 if (WARN_ON_ONCE(!p))
367 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Frederic Weisbecker3d7a1422013-10-11 17:41:11 +0200369 /*
370 * Protect against sighand release/switch in exit/exec and process/
371 * thread timer list entry concurrent read/writes.
372 */
373 sighand = lock_task_sighand(p, &flags);
374 if (unlikely(sighand == NULL)) {
Frederic Weisbeckera3222f82013-10-11 00:37:39 +0200375 /*
376 * We raced with the reaping of the task.
377 * The deletion should have cleared us off the list.
378 */
Frederic Weisbecker531f64f2013-10-11 17:58:08 +0200379 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
Frederic Weisbeckera3222f82013-10-11 00:37:39 +0200380 } else {
Frederic Weisbeckera3222f82013-10-11 00:37:39 +0200381 if (timer->it.cpu.firing)
382 ret = TIMER_RETRY;
383 else
384 list_del(&timer->it.cpu.entry);
Frederic Weisbecker3d7a1422013-10-11 17:41:11 +0200385
386 unlock_task_sighand(p, &flags);
Oleg Nesterov108150e2005-10-23 20:25:39 +0400387 }
Frederic Weisbeckera3222f82013-10-11 00:37:39 +0200388
389 if (!ret)
390 put_task_struct(p);
Oleg Nesterov108150e2005-10-23 20:25:39 +0400391
392 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
394
Frederic Weisbeckeraf82eb32013-10-11 16:11:43 +0200395static void cleanup_timers_list(struct list_head *head)
Frederic Weisbecker1a7fa512013-06-28 00:06:42 +0000396{
397 struct cpu_timer_list *timer, *next;
398
Frederic Weisbeckera0b20622013-06-28 00:06:43 +0000399 list_for_each_entry_safe(timer, next, head, entry)
Frederic Weisbecker1a7fa512013-06-28 00:06:42 +0000400 list_del_init(&timer->entry);
Frederic Weisbecker1a7fa512013-06-28 00:06:42 +0000401}
402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403/*
Thomas Gleixner7cb9a942019-08-19 16:31:45 +0200404 * Clean out CPU timers which are still armed when a thread exits. The
405 * timers are only removed from the list. No other updates are done. The
406 * corresponding posix timers are still accessible, but cannot be rearmed.
407 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 * This must be called with the siglock held.
409 */
Frederic Weisbeckeraf82eb32013-10-11 16:11:43 +0200410static void cleanup_timers(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411{
Frederic Weisbeckeraf82eb32013-10-11 16:11:43 +0200412 cleanup_timers_list(head);
413 cleanup_timers_list(++head);
414 cleanup_timers_list(++head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
417/*
418 * These are both called with the siglock held, when the current thread
419 * is being reaped. When the final (leader) thread in the group is reaped,
420 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
421 */
422void posix_cpu_timers_exit(struct task_struct *tsk)
423{
Frederic Weisbeckeraf82eb32013-10-11 16:11:43 +0200424 cleanup_timers(tsk->cpu_timers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425}
426void posix_cpu_timers_exit_group(struct task_struct *tsk)
427{
Frederic Weisbeckeraf82eb32013-10-11 16:11:43 +0200428 cleanup_timers(tsk->signal->cpu_timers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
430
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100431static inline int expires_gt(u64 expires, u64 new_exp)
Stanislaw Gruszkad1e3b6d2009-07-29 12:15:28 +0200432{
Martin Schwidefsky64861632011-12-15 14:56:09 +0100433 return expires == 0 || expires > new_exp;
Stanislaw Gruszkad1e3b6d2009-07-29 12:15:28 +0200434}
435
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436/*
437 * Insert the timer on the appropriate list before any timers that
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200438 * expire later. This must be called with the sighand lock held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 */
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800440static void arm_timer(struct k_itimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441{
442 struct task_struct *p = timer->it.cpu.task;
443 struct list_head *head, *listpos;
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100444 struct task_cputime *cputime_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 struct cpu_timer_list *const nt = &timer->it.cpu;
446 struct cpu_timer_list *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800448 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
449 head = p->cpu_timers;
450 cputime_expires = &p->cputime_expires;
451 } else {
452 head = p->signal->cpu_timers;
453 cputime_expires = &p->signal->cputime_expires;
454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 head += CPUCLOCK_WHICH(timer->it_clock);
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 listpos = head;
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800458 list_for_each_entry(next, head, entry) {
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000459 if (nt->expires < next->expires)
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800460 break;
461 listpos = &next->entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 }
463 list_add(&nt->entry, listpos);
464
465 if (listpos == head) {
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100466 u64 exp = nt->expires;
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 /*
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800469 * We are the new earliest-expiring POSIX 1.b timer, hence
470 * need to update expiration cache. Take into account that
471 * for process timers we share expiration cache with itimers
472 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 */
474
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800475 switch (CPUCLOCK_WHICH(timer->it_clock)) {
476 case CPUCLOCK_PROF:
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100477 if (expires_gt(cputime_expires->prof_exp, exp))
478 cputime_expires->prof_exp = exp;
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800479 break;
480 case CPUCLOCK_VIRT:
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100481 if (expires_gt(cputime_expires->virt_exp, exp))
482 cputime_expires->virt_exp = exp;
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800483 break;
484 case CPUCLOCK_SCHED:
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100485 if (expires_gt(cputime_expires->sched_exp, exp))
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000486 cputime_expires->sched_exp = exp;
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800487 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 }
Frederic Weisbeckerb7878302015-07-17 22:25:49 +0200489 if (CPUCLOCK_PERTHREAD(timer->it_clock))
490 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
491 else
492 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494}
495
496/*
497 * The timer is locked, fire it and arrange for its reload.
498 */
499static void cpu_timer_fire(struct k_itimer *timer)
500{
Stanislaw Gruszka1f169f82010-03-11 14:04:41 -0800501 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
502 /*
503 * User don't want any signal.
504 */
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000505 timer->it.cpu.expires = 0;
Stanislaw Gruszka1f169f82010-03-11 14:04:41 -0800506 } else if (unlikely(timer->sigq == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 /*
508 * This a special case for clock_nanosleep,
509 * not a normal timer from sys_timer_create.
510 */
511 wake_up_process(timer->it_process);
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000512 timer->it.cpu.expires = 0;
Thomas Gleixner16118792019-01-11 14:33:17 +0100513 } else if (!timer->it_interval) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 /*
515 * One-shot timer. Clear it as soon as it's fired.
516 */
517 posix_timer_event(timer, 0);
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000518 timer->it.cpu.expires = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
520 /*
521 * The signal did not get queued because the signal
522 * was ignored, so we won't get any callback to
523 * reload the timer. But we need to keep it
524 * ticking in case the signal is deliverable next time.
525 */
Thomas Gleixnerf37fb0a2017-05-30 23:15:47 +0200526 posix_cpu_timer_rearm(timer);
Thomas Gleixneraf888d62017-05-30 23:15:42 +0200527 ++timer->it_requeue_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 }
529}
530
531/*
532 * Guts of sys_timer_settime for CPU timers.
533 * This is called with the timer locked and interrupts disabled.
534 * If we return TIMER_RETRY, it's necessary to release the timer's lock
535 * and try again. (This happens when the timer is in the middle of firing.)
536 */
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200537static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
Deepa Dinamani5f252b32017-03-26 12:04:17 -0700538 struct itimerspec64 *new, struct itimerspec64 *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539{
Thomas Gleixnerc7a37c62019-08-21 21:08:56 +0200540 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100541 u64 old_expires, new_expires, old_incr, val;
Thomas Gleixnerc7a37c62019-08-21 21:08:56 +0200542 struct task_struct *p = timer->it.cpu.task;
543 struct sighand_struct *sighand;
544 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 int ret;
546
Thomas Gleixner692117c2019-08-19 16:31:46 +0200547 if (WARN_ON_ONCE(!p))
548 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
Thomas Gleixner098b0e02017-06-20 17:37:36 +0200550 /*
551 * Use the to_ktime conversion because that clamps the maximum
552 * value to KTIME_MAX and avoid multiplication overflows.
553 */
554 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 /*
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200557 * Protect against sighand release/switch in exit/exec and p->cpu_timers
558 * and p->signal->cpu_timers read/write in arm_timer()
559 */
560 sighand = lock_task_sighand(p, &flags);
561 /*
562 * If p has just been reaped, we can no
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 * longer get any information about it at all.
564 */
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200565 if (unlikely(sighand == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 return -ESRCH;
567 }
568
569 /*
570 * Disarm any old timer after extracting its expiry time.
571 */
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400572
573 ret = 0;
Thomas Gleixner16118792019-01-11 14:33:17 +0100574 old_incr = timer->it_interval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 old_expires = timer->it.cpu.expires;
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400576 if (unlikely(timer->it.cpu.firing)) {
577 timer->it.cpu.firing = -1;
578 ret = TIMER_RETRY;
579 } else
580 list_del_init(&timer->it.cpu.entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
582 /*
583 * We need to sample the current value to convert the new
584 * value from to relative and absolute, and to convert the
585 * old value from absolute to relative. To set a process
586 * timer, we need a sample to balance the thread expiry
587 * times (in arm_timer). With an absolute time, we must
588 * check if it's already passed. In short, we need a sample.
589 */
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200590 if (CPUCLOCK_PERTHREAD(timer->it_clock))
591 val = cpu_clock_sample(clkid, p);
592 else
593 val = cpu_clock_sample_group(clkid, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 if (old) {
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000596 if (old_expires == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 old->it_value.tv_sec = 0;
598 old->it_value.tv_nsec = 0;
599 } else {
600 /*
601 * Update the timer in case it has
602 * overrun already. If it has,
603 * we'll report it as having overrun
604 * and with the next reloaded timer
605 * already ticking, though we are
606 * swallowing that pending
607 * notification here to install the
608 * new setting.
609 */
610 bump_cpu_timer(timer, val);
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000611 if (val < timer->it.cpu.expires) {
612 old_expires = timer->it.cpu.expires - val;
Deepa Dinamani5f252b32017-03-26 12:04:17 -0700613 old->it_value = ns_to_timespec64(old_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 } else {
615 old->it_value.tv_nsec = 1;
616 old->it_value.tv_sec = 0;
617 }
618 }
619 }
620
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400621 if (unlikely(ret)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 /*
623 * We are colliding with the timer actually firing.
624 * Punt after filling in the timer's old value, and
625 * disable this firing since we are already reporting
626 * it as an overrun (thanks to bump_cpu_timer above).
627 */
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200628 unlock_task_sighand(p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 goto out;
630 }
631
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200632 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000633 new_expires += val;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 }
635
636 /*
637 * Install the new expiry time (or zero).
638 * For a timer with no notification action, we don't actually
639 * arm the timer (we'll just fake it for timer_gettime).
640 */
641 timer->it.cpu.expires = new_expires;
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000642 if (new_expires != 0 && val < new_expires) {
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800643 arm_timer(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 }
645
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200646 unlock_task_sighand(p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 /*
648 * Install the new reload setting, and
649 * set up the signal and overrun bookkeeping.
650 */
Thomas Gleixner16118792019-01-11 14:33:17 +0100651 timer->it_interval = timespec64_to_ktime(new->it_interval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
653 /*
654 * This acts as a modification timestamp for the timer,
655 * so any automatic reload attempt will punt on seeing
656 * that we have reset the timer manually.
657 */
658 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
659 ~REQUEUE_PENDING;
660 timer->it_overrun_last = 0;
661 timer->it_overrun = -1;
662
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000663 if (new_expires != 0 && !(val < new_expires)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 /*
665 * The designated time already passed, so we notify
666 * immediately, even if the thread never runs to
667 * accumulate more time on this clock.
668 */
669 cpu_timer_fire(timer);
670 }
671
672 ret = 0;
673 out:
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100674 if (old)
Deepa Dinamani5f252b32017-03-26 12:04:17 -0700675 old->it_interval = ns_to_timespec64(old_incr);
Frederic Weisbeckerb7878302015-07-17 22:25:49 +0200676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 return ret;
678}
679
Deepa Dinamani5f252b32017-03-26 12:04:17 -0700680static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681{
Thomas Gleixner99093c52019-08-21 21:08:57 +0200682 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 struct task_struct *p = timer->it.cpu.task;
Thomas Gleixner692117c2019-08-19 16:31:46 +0200684 u64 now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
Thomas Gleixner692117c2019-08-19 16:31:46 +0200686 if (WARN_ON_ONCE(!p))
687 return;
Frederic Weisbeckera3222f82013-10-11 00:37:39 +0200688
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 /*
690 * Easy part: convert the reload time.
691 */
Thomas Gleixner16118792019-01-11 14:33:17 +0100692 itp->it_interval = ktime_to_timespec64(timer->it_interval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Thomas Gleixnereabdec02017-05-30 23:15:51 +0200694 if (!timer->it.cpu.expires)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 /*
698 * Sample the clock to take the difference with the expiry time.
699 */
700 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200701 now = cpu_clock_sample(clkid, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 } else {
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200703 struct sighand_struct *sighand;
704 unsigned long flags;
705
706 /*
707 * Protect against sighand release/switch in exit/exec and
708 * also make timer sampling safe if it ends up calling
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100709 * thread_group_cputime().
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200710 */
711 sighand = lock_task_sighand(p, &flags);
712 if (unlikely(sighand == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 /*
714 * The process has been reaped.
715 * We can't even collect a sample any more.
716 * Call the timer disarmed, nothing else to do.
717 */
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000718 timer->it.cpu.expires = 0;
Alexey Dobriyan2c13ce82016-07-08 01:39:11 +0300719 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 } else {
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200721 now = cpu_clock_sample_group(clkid, p, false);
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200722 unlock_task_sighand(p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 }
725
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000726 if (now < timer->it.cpu.expires) {
Deepa Dinamani5f252b32017-03-26 12:04:17 -0700727 itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 } else {
729 /*
730 * The timer should have expired already, but the firing
731 * hasn't taken place yet. Say it's just about to expire.
732 */
733 itp->it_value.tv_nsec = 1;
734 itp->it_value.tv_sec = 0;
735 }
736}
737
Frederic Weisbecker2473f3e2013-06-28 00:06:43 +0000738static unsigned long long
739check_timers_list(struct list_head *timers,
740 struct list_head *firing,
741 unsigned long long curr)
742{
743 int maxfire = 20;
744
745 while (!list_empty(timers)) {
746 struct cpu_timer_list *t;
747
748 t = list_first_entry(timers, struct cpu_timer_list, entry);
749
750 if (!--maxfire || curr < t->expires)
751 return t->expires;
752
753 t->firing = 1;
754 list_move_tail(&t->entry, firing);
755 }
756
757 return 0;
758}
759
Juri Lelli34be3932017-12-12 12:10:24 +0100760static inline void check_dl_overrun(struct task_struct *tsk)
761{
762 if (tsk->dl.dl_overrun) {
763 tsk->dl.dl_overrun = 0;
764 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
765 }
766}
767
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768/*
769 * Check for any per-thread CPU timers that have fired and move them off
770 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
771 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
772 */
773static void check_thread_timers(struct task_struct *tsk,
774 struct list_head *firing)
775{
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100776 struct task_cputime *tsk_expires = &tsk->cputime_expires;
Thomas Gleixner0476ff22019-08-21 21:09:02 +0200777 struct list_head *timers = tsk->cpu_timers;
778 u64 expires, stime, utime;
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800779 unsigned long soft;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
Juri Lelli34be3932017-12-12 12:10:24 +0100781 if (dl_task(tsk))
782 check_dl_overrun(tsk);
783
Jason Low934715a2015-10-14 12:07:54 -0700784 /*
785 * If cputime_expires is zero, then there are no active
786 * per thread CPU timers.
787 */
788 if (task_cputime_zero(&tsk->cputime_expires))
789 return;
790
Thomas Gleixner0476ff22019-08-21 21:09:02 +0200791 task_cputime(tsk, &utime, &stime);
792
793 expires = check_timers_list(timers, firing, utime + stime);
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100794 tsk_expires->prof_exp = expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
Thomas Gleixner0476ff22019-08-21 21:09:02 +0200796 expires = check_timers_list(++timers, firing, utime);
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100797 tsk_expires->virt_exp = expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
Frederic Weisbecker2473f3e2013-06-28 00:06:43 +0000799 tsk_expires->sched_exp = check_timers_list(++timers, firing,
800 tsk->se.sum_exec_runtime);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100801
802 /*
803 * Check for the special case thread timers.
804 */
Krzysztof Opasiak3cf29492017-07-05 19:25:48 +0200805 soft = task_rlimit(tsk, RLIMIT_RTTIME);
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800806 if (soft != RLIM_INFINITY) {
Krzysztof Opasiak3cf29492017-07-05 19:25:48 +0200807 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100808
Peter Zijlstra5a52dd52008-01-25 21:08:32 +0100809 if (hard != RLIM_INFINITY &&
810 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100811 /*
812 * At the hard limit, we just die.
813 * No need to calculate anything else now.
814 */
Thomas Gleixner43fe8b82017-05-23 23:27:38 +0200815 if (print_fatal_signals) {
816 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
817 tsk->comm, task_pid_nr(tsk));
818 }
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100819 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
820 return;
821 }
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800822 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100823 /*
824 * At the soft limit, send a SIGXCPU every second.
825 */
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800826 if (soft < hard) {
827 soft += USEC_PER_SEC;
Krzysztof Opasiak3cf29492017-07-05 19:25:48 +0200828 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur =
829 soft;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100830 }
Thomas Gleixner43fe8b82017-05-23 23:27:38 +0200831 if (print_fatal_signals) {
832 pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
833 tsk->comm, task_pid_nr(tsk));
834 }
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100835 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
836 }
837 }
Frederic Weisbeckerb7878302015-07-17 22:25:49 +0200838 if (task_cputime_zero(tsk_expires))
839 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840}
841
Jason Low10180162015-04-28 13:00:22 -0700842static inline void stop_process_timers(struct signal_struct *sig)
Peter Zijlstra3fccfd62009-02-10 16:37:31 +0100843{
Stanislaw Gruszka15365c12010-03-11 14:04:31 -0800844 struct thread_group_cputimer *cputimer = &sig->cputimer;
Peter Zijlstra3fccfd62009-02-10 16:37:31 +0100845
Jason Low10180162015-04-28 13:00:22 -0700846 /* Turn off cputimer->running. This is done without locking. */
Jason Lowd5c373e2015-10-14 12:07:55 -0700847 WRITE_ONCE(cputimer->running, false);
Frederic Weisbeckerb7878302015-07-17 22:25:49 +0200848 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
Peter Zijlstra3fccfd62009-02-10 16:37:31 +0100849}
850
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200851static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100852 u64 *expires, u64 cur_time, int signo)
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200853{
Martin Schwidefsky64861632011-12-15 14:56:09 +0100854 if (!it->expires)
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200855 return;
856
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +0100857 if (cur_time >= it->expires) {
858 if (it->incr)
Martin Schwidefsky64861632011-12-15 14:56:09 +0100859 it->expires += it->incr;
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +0100860 else
Martin Schwidefsky64861632011-12-15 14:56:09 +0100861 it->expires = 0;
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200862
Xiao Guangrong3f0a5252009-08-10 10:52:30 +0800863 trace_itimer_expire(signo == SIGPROF ?
864 ITIMER_PROF : ITIMER_VIRTUAL,
Eric W. Biederman6883f812017-06-04 04:32:13 -0500865 task_tgid(tsk), cur_time);
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200866 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
867 }
868
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +0100869 if (it->expires && (!*expires || it->expires < *expires))
870 *expires = it->expires;
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200871}
872
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873/*
874 * Check for any per-thread CPU timers that have fired and move them
875 * off the tsk->*_timers list onto the firing list. Per-thread timers
876 * have already been taken off.
877 */
878static void check_process_timers(struct task_struct *tsk,
879 struct list_head *firing)
880{
881 struct signal_struct *const sig = tsk->signal;
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100882 u64 utime, ptime, virt_expires, prof_expires;
883 u64 sum_sched_runtime, sched_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 struct list_head *timers = sig->cpu_timers;
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100885 struct task_cputime cputime;
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800886 unsigned long soft;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 /*
Jason Low934715a2015-10-14 12:07:54 -0700889 * If cputimer is not running, then there are no active
890 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
891 */
892 if (!READ_ONCE(tsk->signal->cputimer.running))
893 return;
894
Thomas Gleixnera3249562019-08-21 21:08:53 +0200895 /*
Jason Lowc8d75aa2015-10-14 12:07:56 -0700896 * Signify that a thread is checking for process timers.
897 * Write access to this field is protected by the sighand lock.
898 */
899 sig->cputimer.checking_timer = true;
900
Jason Low934715a2015-10-14 12:07:54 -0700901 /*
Thomas Gleixnera3249562019-08-21 21:08:53 +0200902 * Collect the current process totals. Group accounting is active
903 * so the sample can be taken directly.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 */
Thomas Gleixnera3249562019-08-21 21:08:53 +0200905 sample_cputime_atomic(&cputime, &sig->cputimer.cputime_atomic);
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100906 utime = cputime.utime;
907 ptime = utime + cputime.stime;
Frank Mayharf06febc2008-09-12 09:54:39 -0700908 sum_sched_runtime = cputime.sum_exec_runtime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
Frederic Weisbecker2473f3e2013-06-28 00:06:43 +0000910 prof_expires = check_timers_list(timers, firing, ptime);
911 virt_expires = check_timers_list(++timers, firing, utime);
912 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
914 /*
915 * Check for the special case process timers.
916 */
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200917 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
918 SIGPROF);
919 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
920 SIGVTALRM);
Krzysztof Opasiak3cf29492017-07-05 19:25:48 +0200921 soft = task_rlimit(tsk, RLIMIT_CPU);
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800922 if (soft != RLIM_INFINITY) {
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100923 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC);
Krzysztof Opasiak3cf29492017-07-05 19:25:48 +0200924 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100925 u64 x;
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800926 if (psecs >= hard) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 /*
928 * At the hard limit, we just die.
929 * No need to calculate anything else now.
930 */
Thomas Gleixner43fe8b82017-05-23 23:27:38 +0200931 if (print_fatal_signals) {
932 pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
933 tsk->comm, task_pid_nr(tsk));
934 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
936 return;
937 }
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800938 if (psecs >= soft) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 /*
940 * At the soft limit, send a SIGXCPU every second.
941 */
Thomas Gleixner43fe8b82017-05-23 23:27:38 +0200942 if (print_fatal_signals) {
943 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
944 tsk->comm, task_pid_nr(tsk));
945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800947 if (soft < hard) {
948 soft++;
949 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 }
951 }
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100952 x = soft * NSEC_PER_SEC;
953 if (!prof_expires || x < prof_expires)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 prof_expires = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 }
956
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100957 sig->cputime_expires.prof_exp = prof_expires;
958 sig->cputime_expires.virt_exp = virt_expires;
Stanislaw Gruszka29f87b72010-04-27 14:12:15 -0700959 sig->cputime_expires.sched_exp = sched_expires;
960 if (task_cputime_zero(&sig->cputime_expires))
961 stop_process_timers(sig);
Jason Lowc8d75aa2015-10-14 12:07:56 -0700962
963 sig->cputimer.checking_timer = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964}
965
966/*
Thomas Gleixner96fe3b02017-05-30 23:15:46 +0200967 * This is called from the signal code (via posixtimer_rearm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 * when the last timer signal was delivered and we have to reload the timer.
969 */
Thomas Gleixnerf37fb0a2017-05-30 23:15:47 +0200970static void posix_cpu_timer_rearm(struct k_itimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971{
Thomas Gleixnerda020ce2019-08-21 21:08:58 +0200972 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
Thomas Gleixner692117c2019-08-19 16:31:46 +0200973 struct task_struct *p = timer->it.cpu.task;
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200974 struct sighand_struct *sighand;
975 unsigned long flags;
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100976 u64 now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
Thomas Gleixner692117c2019-08-19 16:31:46 +0200978 if (WARN_ON_ONCE(!p))
979 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
981 /*
982 * Fetch the current sample and update the timer's expiry time.
983 */
984 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200985 now = cpu_clock_sample(clkid, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 bump_cpu_timer(timer, now);
Frederic Weisbecker724a3712013-10-10 16:55:57 +0200987 if (unlikely(p->exit_state))
Thomas Gleixneraf888d62017-05-30 23:15:42 +0200988 return;
Frederic Weisbecker724a3712013-10-10 16:55:57 +0200989
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200990 /* Protect timer list r/w in arm_timer() */
991 sighand = lock_task_sighand(p, &flags);
992 if (!sighand)
Thomas Gleixneraf888d62017-05-30 23:15:42 +0200993 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 } else {
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200995 /*
996 * Protect arm_timer() and timer sampling in case of call to
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100997 * thread_group_cputime().
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200998 */
999 sighand = lock_task_sighand(p, &flags);
1000 if (unlikely(sighand == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 /*
1002 * The process has been reaped.
1003 * We can't even collect a sample any more.
1004 */
Frederic Weisbecker55ccb612013-06-28 00:06:42 +00001005 timer->it.cpu.expires = 0;
Thomas Gleixneraf888d62017-05-30 23:15:42 +02001006 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
Thomas Gleixneraf888d62017-05-30 23:15:42 +02001008 /* If the process is dying, no need to rearm */
1009 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 }
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +02001011 now = cpu_clock_sample_group(clkid, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 bump_cpu_timer(timer, now);
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +02001013 /* Leave the sighand locked for the call below. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 }
1015
1016 /*
1017 * Now re-arm for the new expiry time.
1018 */
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -08001019 arm_timer(timer);
Thomas Gleixneraf888d62017-05-30 23:15:42 +02001020unlock:
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +02001021 unlock_task_sighand(p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022}
1023
Frank Mayharf06febc2008-09-12 09:54:39 -07001024/**
Frank Mayharf06febc2008-09-12 09:54:39 -07001025 * task_cputime_expired - Compare two task_cputime entities.
1026 *
1027 * @sample: The task_cputime structure to be checked for expiration.
1028 * @expires: Expiration times, against which @sample will be checked.
1029 *
1030 * Checks @sample against @expires to see if any field of @sample has expired.
1031 * Returns true if any field of the former is greater than the corresponding
1032 * field of the latter if the latter field is set. Otherwise returns false.
1033 */
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +01001034static inline int task_cputime_expired(const struct task_cputime *sample,
1035 const struct task_cputime *expires)
Frank Mayharf06febc2008-09-12 09:54:39 -07001036{
Martin Schwidefsky64861632011-12-15 14:56:09 +01001037 if (expires->utime && sample->utime >= expires->utime)
Frank Mayharf06febc2008-09-12 09:54:39 -07001038 return 1;
Martin Schwidefsky64861632011-12-15 14:56:09 +01001039 if (expires->stime && sample->utime + sample->stime >= expires->stime)
Frank Mayharf06febc2008-09-12 09:54:39 -07001040 return 1;
1041 if (expires->sum_exec_runtime != 0 &&
1042 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1043 return 1;
1044 return 0;
1045}
1046
1047/**
1048 * fastpath_timer_check - POSIX CPU timers fast path.
1049 *
1050 * @tsk: The task (thread) being checked.
Frank Mayharf06febc2008-09-12 09:54:39 -07001051 *
Frank Mayharbb34d922008-09-12 09:54:39 -07001052 * Check the task and thread group timers. If both are zero (there are no
1053 * timers set) return false. Otherwise snapshot the task and thread group
1054 * timers and compare them with the corresponding expiration times. Return
1055 * true if a timer has expired, else return false.
Frank Mayharf06febc2008-09-12 09:54:39 -07001056 */
Frank Mayharbb34d922008-09-12 09:54:39 -07001057static inline int fastpath_timer_check(struct task_struct *tsk)
Frank Mayharf06febc2008-09-12 09:54:39 -07001058{
Oleg Nesterovad133ba2008-11-17 15:39:47 +01001059 struct signal_struct *sig;
Frank Mayharf06febc2008-09-12 09:54:39 -07001060
Frank Mayharbb34d922008-09-12 09:54:39 -07001061 if (!task_cputime_zero(&tsk->cputime_expires)) {
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +01001062 struct task_cputime task_sample;
Frank Mayharbb34d922008-09-12 09:54:39 -07001063
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +01001064 task_cputime(tsk, &task_sample.utime, &task_sample.stime);
Jason Low7c177d92015-10-14 12:07:53 -07001065 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
Frank Mayharbb34d922008-09-12 09:54:39 -07001066 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1067 return 1;
1068 }
Oleg Nesterovad133ba2008-11-17 15:39:47 +01001069
1070 sig = tsk->signal;
Jason Lowc8d75aa2015-10-14 12:07:56 -07001071 /*
1072 * Check if thread group timers expired when the cputimer is
1073 * running and no other thread in the group is already checking
1074 * for thread group cputimers. These fields are read without the
1075 * sighand lock. However, this is fine because this is meant to
1076 * be a fastpath heuristic to determine whether we should try to
1077 * acquire the sighand lock to check/handle timers.
1078 *
1079 * In the worst case scenario, if 'running' or 'checking_timer' gets
1080 * set but the current thread doesn't see the change yet, we'll wait
1081 * until the next thread in the group gets a scheduler interrupt to
1082 * handle the timer. This isn't an issue in practice because these
1083 * types of delays with signals actually getting sent are expected.
1084 */
1085 if (READ_ONCE(sig->cputimer.running) &&
1086 !READ_ONCE(sig->cputimer.checking_timer)) {
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +01001087 struct task_cputime group_sample;
Frank Mayharbb34d922008-09-12 09:54:39 -07001088
Jason Low71107442015-04-28 13:00:24 -07001089 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
Oleg Nesterov8d1f4312010-06-11 20:04:46 +02001090
Frank Mayharbb34d922008-09-12 09:54:39 -07001091 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1092 return 1;
1093 }
Oleg Nesterov37bebc72009-03-23 20:34:11 +01001094
Juri Lelli34be3932017-12-12 12:10:24 +01001095 if (dl_task(tsk) && tsk->dl.dl_overrun)
1096 return 1;
1097
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001098 return 0;
Frank Mayharf06febc2008-09-12 09:54:39 -07001099}
1100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101/*
1102 * This is called from the timer interrupt handler. The irq handler has
1103 * already updated our counts. We need to check if any timers fire now.
1104 * Interrupts are disabled.
1105 */
Thomas Gleixnerdce3e8f2019-08-19 16:31:47 +02001106void run_posix_cpu_timers(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107{
Thomas Gleixnerdce3e8f2019-08-19 16:31:47 +02001108 struct task_struct *tsk = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 struct k_itimer *timer, *next;
Oleg Nesterov0bdd2ed2010-06-11 01:10:18 +02001110 unsigned long flags;
Thomas Gleixnerdce3e8f2019-08-19 16:31:47 +02001111 LIST_HEAD(firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112
Frederic Weisbeckera6968222017-11-06 16:01:28 +01001113 lockdep_assert_irqs_disabled();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001116 * The fast path checks that there are no expired thread or thread
Frank Mayharbb34d922008-09-12 09:54:39 -07001117 * group timers. If that's so, just return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 */
Frank Mayharbb34d922008-09-12 09:54:39 -07001119 if (!fastpath_timer_check(tsk))
Frank Mayharf06febc2008-09-12 09:54:39 -07001120 return;
Ingo Molnar5ce73a42008-09-14 17:11:46 +02001121
Oleg Nesterov0bdd2ed2010-06-11 01:10:18 +02001122 if (!lock_task_sighand(tsk, &flags))
1123 return;
Frank Mayharbb34d922008-09-12 09:54:39 -07001124 /*
1125 * Here we take off tsk->signal->cpu_timers[N] and
1126 * tsk->cpu_timers[N] all the timers that are firing, and
1127 * put them on the firing list.
1128 */
1129 check_thread_timers(tsk, &firing);
Jason Low934715a2015-10-14 12:07:54 -07001130
1131 check_process_timers(tsk, &firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Frank Mayharbb34d922008-09-12 09:54:39 -07001133 /*
1134 * We must release these locks before taking any timer's lock.
1135 * There is a potential race with timer deletion here, as the
1136 * siglock now protects our private firing list. We have set
1137 * the firing flag in each timer, so that a deletion attempt
1138 * that gets the timer lock before we do will give it up and
1139 * spin until we've taken care of that timer below.
1140 */
Oleg Nesterov0bdd2ed2010-06-11 01:10:18 +02001141 unlock_task_sighand(tsk, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
1143 /*
1144 * Now that all the timers on our list have the firing flag,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001145 * no one will touch their list entries but us. We'll take
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 * each timer's lock before clearing its firing flag, so no
1147 * timer call will interfere.
1148 */
1149 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001150 int cpu_firing;
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 spin_lock(&timer->it_lock);
1153 list_del_init(&timer->it.cpu.entry);
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001154 cpu_firing = timer->it.cpu.firing;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 timer->it.cpu.firing = 0;
1156 /*
1157 * The firing flag is -1 if we collided with a reset
1158 * of the timer, which already reported this
1159 * almost-firing as an overrun. So don't generate an event.
1160 */
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001161 if (likely(cpu_firing >= 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 cpu_timer_fire(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 spin_unlock(&timer->it_lock);
1164 }
1165}
1166
1167/*
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001168 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
Frank Mayharf06febc2008-09-12 09:54:39 -07001169 * The tsk->sighand->siglock must be held by the caller.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 */
1171void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001172 u64 *newval, u64 *oldval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173{
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001174 u64 now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Thomas Gleixner692117c2019-08-19 16:31:46 +02001176 if (WARN_ON_ONCE(clock_idx >= CPUCLOCK_SCHED))
1177 return;
1178
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +02001179 now = cpu_clock_sample_group(clock_idx, tsk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
Thomas Gleixner5405d002019-08-21 21:08:59 +02001181 if (oldval) {
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001182 /*
1183 * We are setting itimer. The *oldval is absolute and we update
1184 * it to be relative, *newval argument is relative and we update
1185 * it to be absolute.
1186 */
Martin Schwidefsky64861632011-12-15 14:56:09 +01001187 if (*oldval) {
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001188 if (*oldval <= now) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 /* Just about to fire. */
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001190 *oldval = TICK_NSEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 } else {
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001192 *oldval -= now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 }
1194 }
1195
Martin Schwidefsky64861632011-12-15 14:56:09 +01001196 if (!*newval)
Frederic Weisbeckerb7878302015-07-17 22:25:49 +02001197 return;
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001198 *newval += now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 }
1200
1201 /*
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001202 * Update expiration cache if we are the earliest timer, or eventually
1203 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 */
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001205 switch (clock_idx) {
1206 case CPUCLOCK_PROF:
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001207 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1208 tsk->signal->cputime_expires.prof_exp = *newval;
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001209 break;
1210 case CPUCLOCK_VIRT:
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001211 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1212 tsk->signal->cputime_expires.virt_exp = *newval;
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001213 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 }
Frederic Weisbeckerb7878302015-07-17 22:25:49 +02001215
1216 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217}
1218
Toyo Abee4b76552006-09-29 02:00:29 -07001219static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
Thomas Gleixner343d8fc2017-06-13 23:29:14 +02001220 const struct timespec64 *rqtp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221{
Al Viro86a9c442017-06-07 09:42:26 +01001222 struct itimerspec64 it;
Thomas Gleixner343d8fc2017-06-13 23:29:14 +02001223 struct k_itimer timer;
1224 u64 expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 int error;
1226
1227 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 * Set up a temporary timer and then wait for it to go off.
1229 */
1230 memset(&timer, 0, sizeof timer);
1231 spin_lock_init(&timer.it_lock);
1232 timer.it_clock = which_clock;
1233 timer.it_overrun = -1;
1234 error = posix_cpu_timer_create(&timer);
1235 timer.it_process = current;
1236 if (!error) {
Deepa Dinamani5f252b32017-03-26 12:04:17 -07001237 static struct itimerspec64 zero_it;
Al Viroedbeda42017-06-07 09:42:31 +01001238 struct restart_block *restart;
Toyo Abee4b76552006-09-29 02:00:29 -07001239
Al Viroedbeda42017-06-07 09:42:31 +01001240 memset(&it, 0, sizeof(it));
Al Viro86a9c442017-06-07 09:42:26 +01001241 it.it_value = *rqtp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
1243 spin_lock_irq(&timer.it_lock);
Al Viro86a9c442017-06-07 09:42:26 +01001244 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 if (error) {
1246 spin_unlock_irq(&timer.it_lock);
1247 return error;
1248 }
1249
1250 while (!signal_pending(current)) {
Frederic Weisbecker55ccb612013-06-28 00:06:42 +00001251 if (timer.it.cpu.expires == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 /*
Stanislaw Gruszkae6c42c22013-02-15 11:08:11 +01001253 * Our timer fired and was reset, below
1254 * deletion can not fail.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 */
Stanislaw Gruszkae6c42c22013-02-15 11:08:11 +01001256 posix_cpu_timer_del(&timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 spin_unlock_irq(&timer.it_lock);
1258 return 0;
1259 }
1260
1261 /*
1262 * Block until cpu_timer_fire (or a signal) wakes us.
1263 */
1264 __set_current_state(TASK_INTERRUPTIBLE);
1265 spin_unlock_irq(&timer.it_lock);
1266 schedule();
1267 spin_lock_irq(&timer.it_lock);
1268 }
1269
1270 /*
1271 * We were interrupted by a signal.
1272 */
Thomas Gleixner343d8fc2017-06-13 23:29:14 +02001273 expires = timer.it.cpu.expires;
Al Viro86a9c442017-06-07 09:42:26 +01001274 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
Stanislaw Gruszkae6c42c22013-02-15 11:08:11 +01001275 if (!error) {
1276 /*
1277 * Timer is now unarmed, deletion can not fail.
1278 */
1279 posix_cpu_timer_del(&timer);
1280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 spin_unlock_irq(&timer.it_lock);
1282
Stanislaw Gruszkae6c42c22013-02-15 11:08:11 +01001283 while (error == TIMER_RETRY) {
1284 /*
1285 * We need to handle case when timer was or is in the
1286 * middle of firing. In other cases we already freed
1287 * resources.
1288 */
1289 spin_lock_irq(&timer.it_lock);
1290 error = posix_cpu_timer_del(&timer);
1291 spin_unlock_irq(&timer.it_lock);
1292 }
1293
Al Viro86a9c442017-06-07 09:42:26 +01001294 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 /*
1296 * It actually did fire already.
1297 */
1298 return 0;
1299 }
1300
Toyo Abee4b76552006-09-29 02:00:29 -07001301 error = -ERESTART_RESTARTBLOCK;
Al Viro86a9c442017-06-07 09:42:26 +01001302 /*
1303 * Report back to the user the time still remaining.
1304 */
Al Viroedbeda42017-06-07 09:42:31 +01001305 restart = &current->restart_block;
Thomas Gleixner343d8fc2017-06-13 23:29:14 +02001306 restart->nanosleep.expires = expires;
Deepa Dinamanic0edd7c2017-06-24 11:45:06 -07001307 if (restart->nanosleep.type != TT_NONE)
1308 error = nanosleep_copyout(restart, &it.it_value);
Toyo Abee4b76552006-09-29 02:00:29 -07001309 }
1310
1311 return error;
1312}
1313
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +00001314static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1315
1316static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
Thomas Gleixner938e7cf2017-06-13 23:34:33 +02001317 const struct timespec64 *rqtp)
Toyo Abee4b76552006-09-29 02:00:29 -07001318{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08001319 struct restart_block *restart_block = &current->restart_block;
Toyo Abee4b76552006-09-29 02:00:29 -07001320 int error;
1321
1322 /*
1323 * Diagnose required errors first.
1324 */
1325 if (CPUCLOCK_PERTHREAD(which_clock) &&
1326 (CPUCLOCK_PID(which_clock) == 0 ||
Eric W. Biederman01a21972017-04-13 10:32:16 -05001327 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
Toyo Abee4b76552006-09-29 02:00:29 -07001328 return -EINVAL;
1329
Al Viro86a9c442017-06-07 09:42:26 +01001330 error = do_cpu_nanosleep(which_clock, flags, rqtp);
Toyo Abee4b76552006-09-29 02:00:29 -07001331
1332 if (error == -ERESTART_RESTARTBLOCK) {
1333
Thomas Gleixner3751f9f2011-02-01 13:51:20 +00001334 if (flags & TIMER_ABSTIME)
Toyo Abee4b76552006-09-29 02:00:29 -07001335 return -ERESTARTNOHAND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
Toyo Abe1711ef32006-09-29 02:00:28 -07001337 restart_block->fn = posix_cpu_nsleep_restart;
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001338 restart_block->nanosleep.clockid = which_clock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 return error;
1341}
1342
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +00001343static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344{
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001345 clockid_t which_clock = restart_block->nanosleep.clockid;
Deepa Dinamaniad196382017-03-26 12:04:18 -07001346 struct timespec64 t;
Thomas Gleixner97735f22006-01-09 20:52:37 -08001347
Deepa Dinamaniad196382017-03-26 12:04:18 -07001348 t = ns_to_timespec64(restart_block->nanosleep.expires);
Thomas Gleixner97735f22006-01-09 20:52:37 -08001349
Al Viro86a9c442017-06-07 09:42:26 +01001350 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351}
1352
Nick Desaulniers29f1b2b2017-12-28 22:11:36 -05001353#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1354#define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Thomas Gleixnera924b042006-01-09 20:52:27 -08001356static int process_cpu_clock_getres(const clockid_t which_clock,
Deepa Dinamanid2e3e0c2017-03-26 12:04:15 -07001357 struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358{
1359 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1360}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001361static int process_cpu_clock_get(const clockid_t which_clock,
Deepa Dinamani3c9c12f2017-03-26 12:04:14 -07001362 struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363{
1364 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1365}
1366static int process_cpu_timer_create(struct k_itimer *timer)
1367{
1368 timer->it_clock = PROCESS_CLOCK;
1369 return posix_cpu_timer_create(timer);
1370}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001371static int process_cpu_nsleep(const clockid_t which_clock, int flags,
Thomas Gleixner938e7cf2017-06-13 23:34:33 +02001372 const struct timespec64 *rqtp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373{
Al Viro99e6c0e2017-06-07 09:42:30 +01001374 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001376static int thread_cpu_clock_getres(const clockid_t which_clock,
Deepa Dinamanid2e3e0c2017-03-26 12:04:15 -07001377 struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378{
1379 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1380}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001381static int thread_cpu_clock_get(const clockid_t which_clock,
Deepa Dinamani3c9c12f2017-03-26 12:04:14 -07001382 struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383{
1384 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1385}
1386static int thread_cpu_timer_create(struct k_itimer *timer)
1387{
1388 timer->it_clock = THREAD_CLOCK;
1389 return posix_cpu_timer_create(timer);
1390}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391
Christoph Hellwigd3ba5a92017-05-26 12:03:11 +03001392const struct k_clock clock_posix_cpu = {
Thomas Gleixner19769452011-02-01 13:51:06 +00001393 .clock_getres = posix_cpu_clock_getres,
1394 .clock_set = posix_cpu_clock_set,
1395 .clock_get = posix_cpu_clock_get,
1396 .timer_create = posix_cpu_timer_create,
1397 .nsleep = posix_cpu_nsleep,
Thomas Gleixner19769452011-02-01 13:51:06 +00001398 .timer_set = posix_cpu_timer_set,
1399 .timer_del = posix_cpu_timer_del,
1400 .timer_get = posix_cpu_timer_get,
Thomas Gleixnerf37fb0a2017-05-30 23:15:47 +02001401 .timer_rearm = posix_cpu_timer_rearm,
Thomas Gleixner19769452011-02-01 13:51:06 +00001402};
1403
Christoph Hellwigd3ba5a92017-05-26 12:03:11 +03001404const struct k_clock clock_process = {
1405 .clock_getres = process_cpu_clock_getres,
1406 .clock_get = process_cpu_clock_get,
1407 .timer_create = process_cpu_timer_create,
1408 .nsleep = process_cpu_nsleep,
Christoph Hellwigd3ba5a92017-05-26 12:03:11 +03001409};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Christoph Hellwigd3ba5a92017-05-26 12:03:11 +03001411const struct k_clock clock_thread = {
1412 .clock_getres = thread_cpu_clock_getres,
1413 .clock_get = thread_cpu_clock_get,
1414 .timer_create = thread_cpu_timer_create,
1415};