blob: ef39a7a4a95c639a47a747fffbeda1d3a3f4fb86 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Implement CPU time clocks for the POSIX clock interface.
4 */
5
Ingo Molnar3f07c012017-02-08 18:51:30 +01006#include <linux/sched/signal.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +01007#include <linux/sched/cputime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/posix-timers.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/errno.h>
Roman Zippelf8bd2252008-05-01 04:34:31 -070010#include <linux/math64.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080011#include <linux/uaccess.h>
Frank Mayharbb34d922008-09-12 09:54:39 -070012#include <linux/kernel_stat.h>
Xiao Guangrong3f0a5252009-08-10 10:52:30 +080013#include <trace/events/timer.h>
Frederic Weisbeckera8572162013-04-18 01:31:13 +020014#include <linux/tick.h>
15#include <linux/workqueue.h>
Al Viroedbeda42017-06-07 09:42:31 +010016#include <linux/compat.h>
Juri Lelli34be3932017-12-12 12:10:24 +010017#include <linux/sched/deadline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Thomas Gleixnerbab0aae2017-05-30 23:15:41 +020019#include "posix-timers.h"
20
Thomas Gleixnerf37fb0a2017-05-30 23:15:47 +020021static void posix_cpu_timer_rearm(struct k_itimer *timer);
22
Thomas Gleixner3a245c02019-08-21 21:09:06 +020023void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
24{
25 posix_cputimers_init(pct);
26 if (cpu_limit != RLIM_INFINITY)
Thomas Gleixner87dc6442019-08-26 20:22:24 +020027 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
Thomas Gleixner3a245c02019-08-21 21:09:06 +020028}
29
Frank Mayharf06febc2008-09-12 09:54:39 -070030/*
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -080031 * Called after updating RLIMIT_CPU to run cpu timer and update
Thomas Gleixner87dc6442019-08-26 20:22:24 +020032 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
33 * necessary. Needs siglock protection since other code may update the
Thomas Gleixner3a245c02019-08-21 21:09:06 +020034 * expiration cache as well.
Frank Mayharf06febc2008-09-12 09:54:39 -070035 */
Jiri Slaby5ab46b32009-08-28 14:05:12 +020036void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
Frank Mayharf06febc2008-09-12 09:54:39 -070037{
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +010038 u64 nsecs = rlim_new * NSEC_PER_SEC;
Frank Mayharf06febc2008-09-12 09:54:39 -070039
Jiri Slaby5ab46b32009-08-28 14:05:12 +020040 spin_lock_irq(&task->sighand->siglock);
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +010041 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
Jiri Slaby5ab46b32009-08-28 14:05:12 +020042 spin_unlock_irq(&task->sighand->siglock);
Frank Mayharf06febc2008-09-12 09:54:39 -070043}
44
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020045/*
46 * Functions for validating access to tasks.
47 */
48static struct task_struct *lookup_task(const pid_t pid, bool thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -070049{
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 struct task_struct *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020052 if (!pid)
53 return thread ? current : current->group_leader;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020055 p = find_task_by_vpid(pid);
56 if (!p || p == current)
57 return p;
58 if (thread)
59 return same_thread_group(p, current) ? p : NULL;
60 if (p == current)
61 return p;
62 return has_group_leader_pid(p) ? p : NULL;
63}
64
65static struct task_struct *__get_task_for_clock(const clockid_t clock,
66 bool getref)
67{
68 const bool thread = !!CPUCLOCK_PERTHREAD(clock);
69 const pid_t pid = CPUCLOCK_PID(clock);
70 struct task_struct *p;
71
72 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
73 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Sergey Senozhatskyc0deae82010-11-03 18:52:56 +020075 rcu_read_lock();
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020076 p = lookup_task(pid, thread);
77 if (p && getref)
78 get_task_struct(p);
Sergey Senozhatskyc0deae82010-11-03 18:52:56 +020079 rcu_read_unlock();
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020080 return p;
81}
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
Thomas Gleixner6ae40e32019-08-21 21:08:48 +020083static inline struct task_struct *get_task_for_clock(const clockid_t clock)
84{
85 return __get_task_for_clock(clock, true);
86}
87
88static inline int validate_clock_permissions(const clockid_t clock)
89{
90 return __get_task_for_clock(clock, false) ? 0 : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093/*
94 * Update expiry time from increment, and increase overrun count,
95 * given the current clock sample.
96 */
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +010097static void bump_cpu_timer(struct k_itimer *timer, u64 now)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
99 int i;
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100100 u64 delta, incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Thomas Gleixner16118792019-01-11 14:33:17 +0100102 if (!timer->it_interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 return;
104
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000105 if (now < timer->it.cpu.expires)
106 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Thomas Gleixner16118792019-01-11 14:33:17 +0100108 incr = timer->it_interval;
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000109 delta = now + incr - timer->it.cpu.expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000111 /* Don't use (incr*2 < delta), incr*2 might overflow. */
112 for (i = 0; incr < delta - incr; i++)
113 incr = incr << 1;
114
115 for (; i >= 0; incr >>= 1, i--) {
116 if (delta < incr)
117 continue;
118
119 timer->it.cpu.expires += incr;
Thomas Gleixner78c9c4d2018-06-26 15:21:32 +0200120 timer->it_overrun += 1LL << i;
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000121 delta -= incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 }
123}
124
Thomas Gleixner2bbdbda2019-08-21 21:09:19 +0200125/* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
126static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
Frederic Weisbecker555347f2013-04-19 16:17:38 +0200127{
Thomas Gleixner2bbdbda2019-08-21 21:09:19 +0200128 return !(~pct->bases[CPUCLOCK_PROF].nextevt |
129 ~pct->bases[CPUCLOCK_VIRT].nextevt |
130 ~pct->bases[CPUCLOCK_SCHED].nextevt);
Frederic Weisbecker555347f2013-04-19 16:17:38 +0200131}
132
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000133static int
Deepa Dinamanid2e3e0c2017-03-26 12:04:15 -0700134posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
Thomas Gleixner6ae40e32019-08-21 21:08:48 +0200136 int error = validate_clock_permissions(which_clock);
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 if (!error) {
139 tp->tv_sec = 0;
140 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
141 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
142 /*
143 * If sched_clock is using a cycle counter, we
144 * don't have any idea of its true resolution
145 * exported, but it is much more than 1s/HZ.
146 */
147 tp->tv_nsec = 1;
148 }
149 }
150 return error;
151}
152
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000153static int
Thomas Gleixner6ae40e32019-08-21 21:08:48 +0200154posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155{
Thomas Gleixner6ae40e32019-08-21 21:08:48 +0200156 int error = validate_clock_permissions(clock);
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 /*
159 * You can never reset a CPU clock, but we check for other errors
160 * in the call before failing with EPERM.
161 */
Thomas Gleixner6ae40e32019-08-21 21:08:48 +0200162 return error ? : -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165/*
Thomas Gleixner2092c1d42019-08-21 21:09:00 +0200166 * Sample a per-thread clock for the given task. clkid is validated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 */
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200168static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
Thomas Gleixnerab693c52019-08-21 21:09:03 +0200170 u64 utime, stime;
171
172 if (clkid == CPUCLOCK_SCHED)
173 return task_sched_runtime(p);
174
175 task_cputime(p, &utime, &stime);
176
Thomas Gleixner2092c1d42019-08-21 21:09:00 +0200177 switch (clkid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 case CPUCLOCK_PROF:
Thomas Gleixnerab693c52019-08-21 21:09:03 +0200179 return utime + stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 case CPUCLOCK_VIRT:
Thomas Gleixnerab693c52019-08-21 21:09:03 +0200181 return utime;
Thomas Gleixner2092c1d42019-08-21 21:09:00 +0200182 default:
183 WARN_ON_ONCE(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 }
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200185 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
Thomas Gleixnerb0d524f2019-08-21 21:09:12 +0200188static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
189{
190 samples[CPUCLOCK_PROF] = stime + utime;
191 samples[CPUCLOCK_VIRT] = utime;
192 samples[CPUCLOCK_SCHED] = rtime;
193}
194
195static void task_sample_cputime(struct task_struct *p, u64 *samples)
196{
197 u64 stime, utime;
198
199 task_cputime(p, &utime, &stime);
200 store_samples(samples, stime, utime, p->se.sum_exec_runtime);
201}
202
203static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
204 u64 *samples)
205{
206 u64 stime, utime, rtime;
207
208 utime = atomic64_read(&at->utime);
209 stime = atomic64_read(&at->stime);
210 rtime = atomic64_read(&at->sum_exec_runtime);
211 store_samples(samples, stime, utime, rtime);
212}
213
Jason Low10180162015-04-28 13:00:22 -0700214/*
215 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
216 * to avoid race conditions with concurrent updates to cputime.
217 */
218static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100219{
Jason Low10180162015-04-28 13:00:22 -0700220 u64 curr_cputime;
221retry:
222 curr_cputime = atomic64_read(cputime);
223 if (sum_cputime > curr_cputime) {
224 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
225 goto retry;
226 }
227}
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100228
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200229static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
230 struct task_cputime *sum)
Jason Low10180162015-04-28 13:00:22 -0700231{
Jason Low71107442015-04-28 13:00:24 -0700232 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
233 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
234 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
Jason Low10180162015-04-28 13:00:22 -0700235}
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100236
Thomas Gleixner19298fb2019-08-21 21:08:51 +0200237/**
238 * thread_group_sample_cputime - Sample cputime for a given task
239 * @tsk: Task for which cputime needs to be started
240 * @iimes: Storage for time samples
241 *
242 * Called from sys_getitimer() to calculate the expiry time of an active
243 * timer. That means group cputime accounting is already active. Called
244 * with task sighand lock held.
245 *
246 * Updates @times with an uptodate sample of the thread group cputimes.
247 */
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200248void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
Thomas Gleixner19298fb2019-08-21 21:08:51 +0200249{
250 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
251
252 WARN_ON_ONCE(!cputimer->running);
253
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200254 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
Thomas Gleixner19298fb2019-08-21 21:08:51 +0200255}
256
Thomas Gleixnerc506bef42019-08-21 21:08:54 +0200257/**
258 * thread_group_start_cputime - Start cputime and return a sample
259 * @tsk: Task for which cputime needs to be started
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200260 * @samples: Storage for time samples
Thomas Gleixnerc506bef42019-08-21 21:08:54 +0200261 *
262 * The thread group cputime accouting is avoided when there are no posix
263 * CPU timers armed. Before starting a timer it's required to check whether
264 * the time accounting is active. If not, a full update of the atomic
265 * accounting store needs to be done and the accounting enabled.
266 *
267 * Updates @times with an uptodate sample of the thread group cputimes.
268 */
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200269static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100270{
271 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100272
Jason Low10180162015-04-28 13:00:22 -0700273 /* Check if cputimer isn't running. This is accessed without locking. */
274 if (!READ_ONCE(cputimer->running)) {
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200275 struct task_cputime sum;
276
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100277 /*
278 * The POSIX timer interface allows for absolute time expiry
279 * values through the TIMER_ABSTIME flag, therefore we have
Jason Low10180162015-04-28 13:00:22 -0700280 * to synchronize the timer to the clock every time we start it.
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100281 */
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100282 thread_group_cputime(tsk, &sum);
Jason Low71107442015-04-28 13:00:24 -0700283 update_gt_cputime(&cputimer->cputime_atomic, &sum);
Jason Low10180162015-04-28 13:00:22 -0700284
285 /*
286 * We're setting cputimer->running without a lock. Ensure
287 * this only gets written to in one operation. We set
288 * running after update_gt_cputime() as a small optimization,
289 * but barriers are not required because update_gt_cputime()
290 * can handle concurrent updates.
291 */
Jason Lowd5c373e2015-10-14 12:07:55 -0700292 WRITE_ONCE(cputimer->running, true);
Jason Low10180162015-04-28 13:00:22 -0700293 }
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200294 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
295}
296
297static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
298{
299 struct task_cputime ct;
300
301 thread_group_cputime(tsk, &ct);
302 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100303}
304
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305/*
Thomas Gleixner24ab7f52019-08-21 21:08:55 +0200306 * Sample a process (thread group) clock for the given task clkid. If the
307 * group's cputime accounting is already enabled, read the atomic
308 * store. Otherwise a full update is required. Task's sighand lock must be
Thomas Gleixner2092c1d42019-08-21 21:09:00 +0200309 * held to protect the task traversal on a full update. clkid is already
310 * validated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 */
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200312static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
313 bool start)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314{
Thomas Gleixner24ab7f52019-08-21 21:08:55 +0200315 struct thread_group_cputimer *cputimer = &p->signal->cputimer;
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200316 u64 samples[CPUCLOCK_MAX];
Frank Mayharbb34d922008-09-12 09:54:39 -0700317
Thomas Gleixner24ab7f52019-08-21 21:08:55 +0200318 if (!READ_ONCE(cputimer->running)) {
319 if (start)
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200320 thread_group_start_cputime(p, samples);
Thomas Gleixner24ab7f52019-08-21 21:08:55 +0200321 else
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200322 __thread_group_cputime(p, samples);
Thomas Gleixner24ab7f52019-08-21 21:08:55 +0200323 } else {
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200324 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
Thomas Gleixner24ab7f52019-08-21 21:08:55 +0200325 }
326
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200327 return samples[clkid];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
329
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200330static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
Frederic Weisbecker33ab0fe2013-10-11 17:41:11 +0200331{
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200332 const clockid_t clkid = CPUCLOCK_WHICH(clock);
333 struct task_struct *tsk;
334 u64 t;
Frederic Weisbecker33ab0fe2013-10-11 17:41:11 +0200335
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200336 tsk = get_task_for_clock(clock);
337 if (!tsk)
338 return -EINVAL;
Frederic Weisbecker33ab0fe2013-10-11 17:41:11 +0200339
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200340 if (CPUCLOCK_PERTHREAD(clock))
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200341 t = cpu_clock_sample(clkid, tsk);
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200342 else
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200343 t = cpu_clock_sample_group(clkid, tsk, false);
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200344 put_task_struct(tsk);
Frederic Weisbecker33ab0fe2013-10-11 17:41:11 +0200345
Thomas Gleixnerbfcf3e92019-08-21 21:08:49 +0200346 *tp = ns_to_timespec64(t);
347 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350/*
351 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
Stanislaw Gruszkaba5ea952009-11-17 14:14:13 -0800352 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
353 * new timer already all-zeros initialized.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 */
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000355static int posix_cpu_timer_create(struct k_itimer *new_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
Thomas Gleixnere5a8b652019-08-21 21:08:50 +0200357 struct task_struct *p = get_task_for_clock(new_timer->it_clock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Thomas Gleixnere5a8b652019-08-21 21:08:50 +0200359 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 return -EINVAL;
361
Thomas Gleixnerd97bb752017-05-30 23:15:44 +0200362 new_timer->kclock = &clock_posix_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 new_timer->it.cpu.task = p;
Thomas Gleixnere5a8b652019-08-21 21:08:50 +0200365 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366}
367
368/*
369 * Clean up a CPU-clock timer that is about to be destroyed.
370 * This is called from timer deletion with the timer already locked.
371 * If we return TIMER_RETRY, it's necessary to release the timer's lock
372 * and try again. (This happens when the timer is in the middle of firing.)
373 */
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000374static int posix_cpu_timer_del(struct k_itimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
Oleg Nesterov108150e2005-10-23 20:25:39 +0400376 int ret = 0;
Frederic Weisbecker3d7a1422013-10-11 17:41:11 +0200377 unsigned long flags;
378 struct sighand_struct *sighand;
379 struct task_struct *p = timer->it.cpu.task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Thomas Gleixner692117c2019-08-19 16:31:46 +0200381 if (WARN_ON_ONCE(!p))
382 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
Frederic Weisbecker3d7a1422013-10-11 17:41:11 +0200384 /*
385 * Protect against sighand release/switch in exit/exec and process/
386 * thread timer list entry concurrent read/writes.
387 */
388 sighand = lock_task_sighand(p, &flags);
389 if (unlikely(sighand == NULL)) {
Frederic Weisbeckera3222f82013-10-11 00:37:39 +0200390 /*
391 * We raced with the reaping of the task.
392 * The deletion should have cleared us off the list.
393 */
Frederic Weisbecker531f64f2013-10-11 17:58:08 +0200394 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
Frederic Weisbeckera3222f82013-10-11 00:37:39 +0200395 } else {
Frederic Weisbeckera3222f82013-10-11 00:37:39 +0200396 if (timer->it.cpu.firing)
397 ret = TIMER_RETRY;
398 else
399 list_del(&timer->it.cpu.entry);
Frederic Weisbecker3d7a1422013-10-11 17:41:11 +0200400
401 unlock_task_sighand(p, &flags);
Oleg Nesterov108150e2005-10-23 20:25:39 +0400402 }
Frederic Weisbeckera3222f82013-10-11 00:37:39 +0200403
404 if (!ret)
405 put_task_struct(p);
Oleg Nesterov108150e2005-10-23 20:25:39 +0400406
407 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408}
409
Frederic Weisbeckeraf82eb32013-10-11 16:11:43 +0200410static void cleanup_timers_list(struct list_head *head)
Frederic Weisbecker1a7fa512013-06-28 00:06:42 +0000411{
412 struct cpu_timer_list *timer, *next;
413
Frederic Weisbeckera0b20622013-06-28 00:06:43 +0000414 list_for_each_entry_safe(timer, next, head, entry)
Frederic Weisbecker1a7fa512013-06-28 00:06:42 +0000415 list_del_init(&timer->entry);
Frederic Weisbecker1a7fa512013-06-28 00:06:42 +0000416}
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/*
Thomas Gleixner7cb9a942019-08-19 16:31:45 +0200419 * Clean out CPU timers which are still armed when a thread exits. The
420 * timers are only removed from the list. No other updates are done. The
421 * corresponding posix timers are still accessible, but cannot be rearmed.
422 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 * This must be called with the siglock held.
424 */
Thomas Gleixner2b699422019-08-21 21:09:04 +0200425static void cleanup_timers(struct posix_cputimers *pct)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
Thomas Gleixner87dc6442019-08-26 20:22:24 +0200427 cleanup_timers_list(&pct->bases[CPUCLOCK_PROF].cpu_timers);
428 cleanup_timers_list(&pct->bases[CPUCLOCK_VIRT].cpu_timers);
429 cleanup_timers_list(&pct->bases[CPUCLOCK_SCHED].cpu_timers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
432/*
433 * These are both called with the siglock held, when the current thread
434 * is being reaped. When the final (leader) thread in the group is reaped,
435 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
436 */
437void posix_cpu_timers_exit(struct task_struct *tsk)
438{
Thomas Gleixner2b699422019-08-21 21:09:04 +0200439 cleanup_timers(&tsk->posix_cputimers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440}
441void posix_cpu_timers_exit_group(struct task_struct *tsk)
442{
Thomas Gleixner2b699422019-08-21 21:09:04 +0200443 cleanup_timers(&tsk->signal->posix_cputimers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444}
445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446/*
447 * Insert the timer on the appropriate list before any timers that
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200448 * expire later. This must be called with the sighand lock held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 */
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800450static void arm_timer(struct k_itimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451{
Thomas Gleixner3b495b22019-08-21 21:09:08 +0200452 struct cpu_timer_list *const nt = &timer->it.cpu;
453 int clkidx = CPUCLOCK_WHICH(timer->it_clock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 struct task_struct *p = timer->it.cpu.task;
Thomas Gleixner87dc6442019-08-26 20:22:24 +0200455 u64 newexp = timer->it.cpu.expires;
456 struct posix_cputimer_base *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 struct list_head *head, *listpos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 struct cpu_timer_list *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Thomas Gleixner87dc6442019-08-26 20:22:24 +0200460 if (CPUCLOCK_PERTHREAD(timer->it_clock))
461 base = p->posix_cputimers.bases + clkidx;
462 else
463 base = p->signal->posix_cputimers.bases + clkidx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Thomas Gleixner87dc6442019-08-26 20:22:24 +0200465 listpos = head = &base->cpu_timers;
466 list_for_each_entry(next,head, entry) {
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000467 if (nt->expires < next->expires)
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800468 break;
469 listpos = &next->entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 }
471 list_add(&nt->entry, listpos);
472
Thomas Gleixner3b495b22019-08-21 21:09:08 +0200473 if (listpos != head)
474 return;
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800475
Thomas Gleixner3b495b22019-08-21 21:09:08 +0200476 /*
477 * We are the new earliest-expiring POSIX 1.b timer, hence
478 * need to update expiration cache. Take into account that
479 * for process timers we share expiration cache with itimers
480 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
481 */
Thomas Gleixner2bbdbda2019-08-21 21:09:19 +0200482 if (newexp < base->nextevt)
Thomas Gleixner87dc6442019-08-26 20:22:24 +0200483 base->nextevt = newexp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Thomas Gleixner3b495b22019-08-21 21:09:08 +0200485 if (CPUCLOCK_PERTHREAD(timer->it_clock))
486 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
487 else
488 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489}
490
491/*
492 * The timer is locked, fire it and arrange for its reload.
493 */
494static void cpu_timer_fire(struct k_itimer *timer)
495{
Stanislaw Gruszka1f169f82010-03-11 14:04:41 -0800496 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
497 /*
498 * User don't want any signal.
499 */
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000500 timer->it.cpu.expires = 0;
Stanislaw Gruszka1f169f82010-03-11 14:04:41 -0800501 } else if (unlikely(timer->sigq == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 /*
503 * This a special case for clock_nanosleep,
504 * not a normal timer from sys_timer_create.
505 */
506 wake_up_process(timer->it_process);
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000507 timer->it.cpu.expires = 0;
Thomas Gleixner16118792019-01-11 14:33:17 +0100508 } else if (!timer->it_interval) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 /*
510 * One-shot timer. Clear it as soon as it's fired.
511 */
512 posix_timer_event(timer, 0);
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000513 timer->it.cpu.expires = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
515 /*
516 * The signal did not get queued because the signal
517 * was ignored, so we won't get any callback to
518 * reload the timer. But we need to keep it
519 * ticking in case the signal is deliverable next time.
520 */
Thomas Gleixnerf37fb0a2017-05-30 23:15:47 +0200521 posix_cpu_timer_rearm(timer);
Thomas Gleixneraf888d62017-05-30 23:15:42 +0200522 ++timer->it_requeue_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 }
524}
525
526/*
527 * Guts of sys_timer_settime for CPU timers.
528 * This is called with the timer locked and interrupts disabled.
529 * If we return TIMER_RETRY, it's necessary to release the timer's lock
530 * and try again. (This happens when the timer is in the middle of firing.)
531 */
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200532static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
Deepa Dinamani5f252b32017-03-26 12:04:17 -0700533 struct itimerspec64 *new, struct itimerspec64 *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534{
Thomas Gleixnerc7a37c62019-08-21 21:08:56 +0200535 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100536 u64 old_expires, new_expires, old_incr, val;
Thomas Gleixnerc7a37c62019-08-21 21:08:56 +0200537 struct task_struct *p = timer->it.cpu.task;
538 struct sighand_struct *sighand;
539 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 int ret;
541
Thomas Gleixner692117c2019-08-19 16:31:46 +0200542 if (WARN_ON_ONCE(!p))
543 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Thomas Gleixner098b0e02017-06-20 17:37:36 +0200545 /*
546 * Use the to_ktime conversion because that clamps the maximum
547 * value to KTIME_MAX and avoid multiplication overflows.
548 */
549 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 /*
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200552 * Protect against sighand release/switch in exit/exec and p->cpu_timers
553 * and p->signal->cpu_timers read/write in arm_timer()
554 */
555 sighand = lock_task_sighand(p, &flags);
556 /*
557 * If p has just been reaped, we can no
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 * longer get any information about it at all.
559 */
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200560 if (unlikely(sighand == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 return -ESRCH;
562 }
563
564 /*
565 * Disarm any old timer after extracting its expiry time.
566 */
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400567
568 ret = 0;
Thomas Gleixner16118792019-01-11 14:33:17 +0100569 old_incr = timer->it_interval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 old_expires = timer->it.cpu.expires;
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400571 if (unlikely(timer->it.cpu.firing)) {
572 timer->it.cpu.firing = -1;
573 ret = TIMER_RETRY;
574 } else
575 list_del_init(&timer->it.cpu.entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 /*
578 * We need to sample the current value to convert the new
579 * value from to relative and absolute, and to convert the
580 * old value from absolute to relative. To set a process
581 * timer, we need a sample to balance the thread expiry
582 * times (in arm_timer). With an absolute time, we must
583 * check if it's already passed. In short, we need a sample.
584 */
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200585 if (CPUCLOCK_PERTHREAD(timer->it_clock))
586 val = cpu_clock_sample(clkid, p);
587 else
588 val = cpu_clock_sample_group(clkid, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590 if (old) {
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000591 if (old_expires == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 old->it_value.tv_sec = 0;
593 old->it_value.tv_nsec = 0;
594 } else {
595 /*
596 * Update the timer in case it has
597 * overrun already. If it has,
598 * we'll report it as having overrun
599 * and with the next reloaded timer
600 * already ticking, though we are
601 * swallowing that pending
602 * notification here to install the
603 * new setting.
604 */
605 bump_cpu_timer(timer, val);
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000606 if (val < timer->it.cpu.expires) {
607 old_expires = timer->it.cpu.expires - val;
Deepa Dinamani5f252b32017-03-26 12:04:17 -0700608 old->it_value = ns_to_timespec64(old_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 } else {
610 old->it_value.tv_nsec = 1;
611 old->it_value.tv_sec = 0;
612 }
613 }
614 }
615
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400616 if (unlikely(ret)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 /*
618 * We are colliding with the timer actually firing.
619 * Punt after filling in the timer's old value, and
620 * disable this firing since we are already reporting
621 * it as an overrun (thanks to bump_cpu_timer above).
622 */
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200623 unlock_task_sighand(p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 goto out;
625 }
626
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200627 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000628 new_expires += val;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 }
630
631 /*
632 * Install the new expiry time (or zero).
633 * For a timer with no notification action, we don't actually
634 * arm the timer (we'll just fake it for timer_gettime).
635 */
636 timer->it.cpu.expires = new_expires;
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000637 if (new_expires != 0 && val < new_expires) {
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800638 arm_timer(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 }
640
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200641 unlock_task_sighand(p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 /*
643 * Install the new reload setting, and
644 * set up the signal and overrun bookkeeping.
645 */
Thomas Gleixner16118792019-01-11 14:33:17 +0100646 timer->it_interval = timespec64_to_ktime(new->it_interval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
648 /*
649 * This acts as a modification timestamp for the timer,
650 * so any automatic reload attempt will punt on seeing
651 * that we have reset the timer manually.
652 */
653 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
654 ~REQUEUE_PENDING;
655 timer->it_overrun_last = 0;
656 timer->it_overrun = -1;
657
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000658 if (new_expires != 0 && !(val < new_expires)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 /*
660 * The designated time already passed, so we notify
661 * immediately, even if the thread never runs to
662 * accumulate more time on this clock.
663 */
664 cpu_timer_fire(timer);
665 }
666
667 ret = 0;
668 out:
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100669 if (old)
Deepa Dinamani5f252b32017-03-26 12:04:17 -0700670 old->it_interval = ns_to_timespec64(old_incr);
Frederic Weisbeckerb7878302015-07-17 22:25:49 +0200671
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 return ret;
673}
674
Deepa Dinamani5f252b32017-03-26 12:04:17 -0700675static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676{
Thomas Gleixner99093c52019-08-21 21:08:57 +0200677 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 struct task_struct *p = timer->it.cpu.task;
Thomas Gleixner692117c2019-08-19 16:31:46 +0200679 u64 now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Thomas Gleixner692117c2019-08-19 16:31:46 +0200681 if (WARN_ON_ONCE(!p))
682 return;
Frederic Weisbeckera3222f82013-10-11 00:37:39 +0200683
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 /*
685 * Easy part: convert the reload time.
686 */
Thomas Gleixner16118792019-01-11 14:33:17 +0100687 itp->it_interval = ktime_to_timespec64(timer->it_interval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
Thomas Gleixnereabdec02017-05-30 23:15:51 +0200689 if (!timer->it.cpu.expires)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 /*
693 * Sample the clock to take the difference with the expiry time.
694 */
695 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200696 now = cpu_clock_sample(clkid, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 } else {
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200698 struct sighand_struct *sighand;
699 unsigned long flags;
700
701 /*
702 * Protect against sighand release/switch in exit/exec and
703 * also make timer sampling safe if it ends up calling
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100704 * thread_group_cputime().
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200705 */
706 sighand = lock_task_sighand(p, &flags);
707 if (unlikely(sighand == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 /*
709 * The process has been reaped.
710 * We can't even collect a sample any more.
711 * Call the timer disarmed, nothing else to do.
712 */
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000713 timer->it.cpu.expires = 0;
Alexey Dobriyan2c13ce82016-07-08 01:39:11 +0300714 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 } else {
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200716 now = cpu_clock_sample_group(clkid, p, false);
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200717 unlock_task_sighand(p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 }
720
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000721 if (now < timer->it.cpu.expires) {
Deepa Dinamani5f252b32017-03-26 12:04:17 -0700722 itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 } else {
724 /*
725 * The timer should have expired already, but the firing
726 * hasn't taken place yet. Say it's just about to expire.
727 */
728 itp->it_value.tv_nsec = 1;
729 itp->it_value.tv_sec = 0;
730 }
731}
732
Frederic Weisbecker2473f3e2013-06-28 00:06:43 +0000733static unsigned long long
734check_timers_list(struct list_head *timers,
735 struct list_head *firing,
736 unsigned long long curr)
737{
738 int maxfire = 20;
739
740 while (!list_empty(timers)) {
741 struct cpu_timer_list *t;
742
743 t = list_first_entry(timers, struct cpu_timer_list, entry);
744
745 if (!--maxfire || curr < t->expires)
746 return t->expires;
747
748 t->firing = 1;
749 list_move_tail(&t->entry, firing);
750 }
751
Thomas Gleixner2bbdbda2019-08-21 21:09:19 +0200752 return U64_MAX;
Frederic Weisbecker2473f3e2013-06-28 00:06:43 +0000753}
754
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200755static void collect_posix_cputimers(struct posix_cputimers *pct,
756 u64 *samples, struct list_head *firing)
757{
758 struct posix_cputimer_base *base = pct->bases;
759 int i;
760
761 for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
762 base->nextevt = check_timers_list(&base->cpu_timers, firing,
763 samples[i]);
764 }
765}
766
Juri Lelli34be3932017-12-12 12:10:24 +0100767static inline void check_dl_overrun(struct task_struct *tsk)
768{
769 if (tsk->dl.dl_overrun) {
770 tsk->dl.dl_overrun = 0;
771 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
772 }
773}
774
Thomas Gleixner8991afe2019-08-21 21:09:23 +0200775static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
776{
777 if (time < limit)
778 return false;
779
780 if (print_fatal_signals) {
781 pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
782 rt ? "RT" : "CPU", hard ? "hard" : "soft",
783 current->comm, task_pid_nr(current));
784 }
785 __group_send_sig_info(signo, SEND_SIG_PRIV, current);
786 return true;
787}
788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789/*
790 * Check for any per-thread CPU timers that have fired and move them off
791 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
792 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
793 */
794static void check_thread_timers(struct task_struct *tsk,
795 struct list_head *firing)
796{
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200797 struct posix_cputimers *pct = &tsk->posix_cputimers;
798 u64 samples[CPUCLOCK_MAX];
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800799 unsigned long soft;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Juri Lelli34be3932017-12-12 12:10:24 +0100801 if (dl_task(tsk))
802 check_dl_overrun(tsk);
803
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200804 if (expiry_cache_is_inactive(pct))
Jason Low934715a2015-10-14 12:07:54 -0700805 return;
806
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200807 task_sample_cputime(tsk, samples);
808 collect_posix_cputimers(pct, samples, firing);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100809
810 /*
811 * Check for the special case thread timers.
812 */
Krzysztof Opasiak3cf29492017-07-05 19:25:48 +0200813 soft = task_rlimit(tsk, RLIMIT_RTTIME);
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800814 if (soft != RLIM_INFINITY) {
Thomas Gleixner8ea1de92019-08-21 21:09:21 +0200815 /* Task RT timeout is accounted in jiffies. RTTIME is usec */
Thomas Gleixner8991afe2019-08-21 21:09:23 +0200816 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
Krzysztof Opasiak3cf29492017-07-05 19:25:48 +0200817 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100818
Thomas Gleixner8991afe2019-08-21 21:09:23 +0200819 /* At the hard limit, send SIGKILL. No further action. */
820 if (hard != RLIM_INFINITY &&
821 check_rlimit(rttime, hard, SIGKILL, true, true))
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100822 return;
Thomas Gleixnerdd670222019-08-21 21:09:22 +0200823
Thomas Gleixner8991afe2019-08-21 21:09:23 +0200824 /* At the soft limit, send a SIGXCPU every second */
825 if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
Thomas Gleixnerdd670222019-08-21 21:09:22 +0200826 soft += USEC_PER_SEC;
827 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100828 }
829 }
Thomas Gleixnerc02b0782019-08-21 21:09:10 +0200830
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200831 if (expiry_cache_is_inactive(pct))
Frederic Weisbeckerb7878302015-07-17 22:25:49 +0200832 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833}
834
Jason Low10180162015-04-28 13:00:22 -0700835static inline void stop_process_timers(struct signal_struct *sig)
Peter Zijlstra3fccfd62009-02-10 16:37:31 +0100836{
Stanislaw Gruszka15365c12010-03-11 14:04:31 -0800837 struct thread_group_cputimer *cputimer = &sig->cputimer;
Peter Zijlstra3fccfd62009-02-10 16:37:31 +0100838
Jason Low10180162015-04-28 13:00:22 -0700839 /* Turn off cputimer->running. This is done without locking. */
Jason Lowd5c373e2015-10-14 12:07:55 -0700840 WRITE_ONCE(cputimer->running, false);
Frederic Weisbeckerb7878302015-07-17 22:25:49 +0200841 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
Peter Zijlstra3fccfd62009-02-10 16:37:31 +0100842}
843
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200844static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100845 u64 *expires, u64 cur_time, int signo)
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200846{
Martin Schwidefsky64861632011-12-15 14:56:09 +0100847 if (!it->expires)
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200848 return;
849
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +0100850 if (cur_time >= it->expires) {
851 if (it->incr)
Martin Schwidefsky64861632011-12-15 14:56:09 +0100852 it->expires += it->incr;
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +0100853 else
Martin Schwidefsky64861632011-12-15 14:56:09 +0100854 it->expires = 0;
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200855
Xiao Guangrong3f0a5252009-08-10 10:52:30 +0800856 trace_itimer_expire(signo == SIGPROF ?
857 ITIMER_PROF : ITIMER_VIRTUAL,
Eric W. Biederman6883f812017-06-04 04:32:13 -0500858 task_tgid(tsk), cur_time);
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200859 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
860 }
861
Thomas Gleixner2bbdbda2019-08-21 21:09:19 +0200862 if (it->expires && it->expires < *expires)
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +0100863 *expires = it->expires;
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200864}
865
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866/*
867 * Check for any per-thread CPU timers that have fired and move them
868 * off the tsk->*_timers list onto the firing list. Per-thread timers
869 * have already been taken off.
870 */
871static void check_process_timers(struct task_struct *tsk,
872 struct list_head *firing)
873{
874 struct signal_struct *const sig = tsk->signal;
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200875 struct posix_cputimers *pct = &sig->posix_cputimers;
876 u64 samples[CPUCLOCK_MAX];
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800877 unsigned long soft;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879 /*
Jason Low934715a2015-10-14 12:07:54 -0700880 * If cputimer is not running, then there are no active
881 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
882 */
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200883 if (!READ_ONCE(sig->cputimer.running))
Jason Low934715a2015-10-14 12:07:54 -0700884 return;
885
Thomas Gleixnera3249562019-08-21 21:08:53 +0200886 /*
Jason Lowc8d75aa2015-10-14 12:07:56 -0700887 * Signify that a thread is checking for process timers.
888 * Write access to this field is protected by the sighand lock.
889 */
890 sig->cputimer.checking_timer = true;
891
Jason Low934715a2015-10-14 12:07:54 -0700892 /*
Thomas Gleixnera3249562019-08-21 21:08:53 +0200893 * Collect the current process totals. Group accounting is active
894 * so the sample can be taken directly.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 */
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200896 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200897 collect_posix_cputimers(pct, samples, firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
899 /*
900 * Check for the special case process timers.
901 */
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200902 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
903 &pct->bases[CPUCLOCK_PROF].nextevt,
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200904 samples[CPUCLOCK_PROF], SIGPROF);
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200905 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
906 &pct->bases[CPUCLOCK_VIRT].nextevt,
907 samples[CPUCLOCK_VIRT], SIGVTALRM);
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200908
Krzysztof Opasiak3cf29492017-07-05 19:25:48 +0200909 soft = task_rlimit(tsk, RLIMIT_CPU);
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800910 if (soft != RLIM_INFINITY) {
Thomas Gleixner8ea1de92019-08-21 21:09:21 +0200911 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
Krzysztof Opasiak3cf29492017-07-05 19:25:48 +0200912 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
Thomas Gleixner8ea1de92019-08-21 21:09:21 +0200913 u64 ptime = samples[CPUCLOCK_PROF];
914 u64 softns = (u64)soft * NSEC_PER_SEC;
915 u64 hardns = (u64)hard * NSEC_PER_SEC;
Thomas Gleixnerb7be4ef2019-08-21 21:09:16 +0200916
Thomas Gleixner8991afe2019-08-21 21:09:23 +0200917 /* At the hard limit, send SIGKILL. No further action. */
918 if (hard != RLIM_INFINITY &&
919 check_rlimit(ptime, hardns, SIGKILL, false, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return;
Thomas Gleixnerdd670222019-08-21 21:09:22 +0200921
Thomas Gleixner8991afe2019-08-21 21:09:23 +0200922 /* At the soft limit, send a SIGXCPU every second */
923 if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
Thomas Gleixnerdd670222019-08-21 21:09:22 +0200924 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
925 softns += NSEC_PER_SEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 }
Thomas Gleixner8ea1de92019-08-21 21:09:21 +0200927
928 /* Update the expiry cache */
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200929 if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
930 pct->bases[CPUCLOCK_PROF].nextevt = softns;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 }
932
Thomas Gleixner1cd07c0b2019-08-21 21:09:20 +0200933 if (expiry_cache_is_inactive(pct))
Stanislaw Gruszka29f87b72010-04-27 14:12:15 -0700934 stop_process_timers(sig);
Jason Lowc8d75aa2015-10-14 12:07:56 -0700935
936 sig->cputimer.checking_timer = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937}
938
939/*
Thomas Gleixner96fe3b02017-05-30 23:15:46 +0200940 * This is called from the signal code (via posixtimer_rearm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 * when the last timer signal was delivered and we have to reload the timer.
942 */
Thomas Gleixnerf37fb0a2017-05-30 23:15:47 +0200943static void posix_cpu_timer_rearm(struct k_itimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
Thomas Gleixnerda020ce2019-08-21 21:08:58 +0200945 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
Thomas Gleixner692117c2019-08-19 16:31:46 +0200946 struct task_struct *p = timer->it.cpu.task;
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200947 struct sighand_struct *sighand;
948 unsigned long flags;
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100949 u64 now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
Thomas Gleixner692117c2019-08-19 16:31:46 +0200951 if (WARN_ON_ONCE(!p))
952 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954 /*
955 * Fetch the current sample and update the timer's expiry time.
956 */
957 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200958 now = cpu_clock_sample(clkid, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 bump_cpu_timer(timer, now);
Frederic Weisbecker724a3712013-10-10 16:55:57 +0200960 if (unlikely(p->exit_state))
Thomas Gleixneraf888d62017-05-30 23:15:42 +0200961 return;
Frederic Weisbecker724a3712013-10-10 16:55:57 +0200962
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200963 /* Protect timer list r/w in arm_timer() */
964 sighand = lock_task_sighand(p, &flags);
965 if (!sighand)
Thomas Gleixneraf888d62017-05-30 23:15:42 +0200966 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 } else {
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200968 /*
969 * Protect arm_timer() and timer sampling in case of call to
Frederic Weisbeckerebd7e7f2017-01-31 04:09:34 +0100970 * thread_group_cputime().
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200971 */
972 sighand = lock_task_sighand(p, &flags);
973 if (unlikely(sighand == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 /*
975 * The process has been reaped.
976 * We can't even collect a sample any more.
977 */
Frederic Weisbecker55ccb612013-06-28 00:06:42 +0000978 timer->it.cpu.expires = 0;
Thomas Gleixneraf888d62017-05-30 23:15:42 +0200979 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
Thomas Gleixneraf888d62017-05-30 23:15:42 +0200981 /* If the process is dying, no need to rearm */
982 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 }
Thomas Gleixner8c2d74f2019-08-21 21:09:01 +0200984 now = cpu_clock_sample_group(clkid, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 bump_cpu_timer(timer, now);
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200986 /* Leave the sighand locked for the call below. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 }
988
989 /*
990 * Now re-arm for the new expiry time.
991 */
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800992 arm_timer(timer);
Thomas Gleixneraf888d62017-05-30 23:15:42 +0200993unlock:
Frederic Weisbeckere73d84e2013-10-11 18:56:49 +0200994 unlock_task_sighand(p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995}
996
Frank Mayharf06febc2008-09-12 09:54:39 -0700997/**
Thomas Gleixner87dc6442019-08-26 20:22:24 +0200998 * task_cputimers_expired - Check whether posix CPU timers are expired
Frank Mayharf06febc2008-09-12 09:54:39 -0700999 *
Thomas Gleixner001f7972019-08-21 21:09:13 +02001000 * @samples: Array of current samples for the CPUCLOCK clocks
Thomas Gleixner87dc6442019-08-26 20:22:24 +02001001 * @pct: Pointer to a posix_cputimers container
Frank Mayharf06febc2008-09-12 09:54:39 -07001002 *
Thomas Gleixner87dc6442019-08-26 20:22:24 +02001003 * Returns true if any member of @samples is greater than the corresponding
1004 * member of @pct->bases[CLK].nextevt. False otherwise
Frank Mayharf06febc2008-09-12 09:54:39 -07001005 */
Thomas Gleixner87dc6442019-08-26 20:22:24 +02001006static inline bool
1007task_cputimers_expired(const u64 *sample, struct posix_cputimers *pct)
Frank Mayharf06febc2008-09-12 09:54:39 -07001008{
Thomas Gleixner001f7972019-08-21 21:09:13 +02001009 int i;
1010
1011 for (i = 0; i < CPUCLOCK_MAX; i++) {
Thomas Gleixner2bbdbda2019-08-21 21:09:19 +02001012 if (sample[i] >= pct->bases[i].nextevt)
Thomas Gleixner001f7972019-08-21 21:09:13 +02001013 return true;
1014 }
1015 return false;
Frank Mayharf06febc2008-09-12 09:54:39 -07001016}
1017
1018/**
1019 * fastpath_timer_check - POSIX CPU timers fast path.
1020 *
1021 * @tsk: The task (thread) being checked.
Frank Mayharf06febc2008-09-12 09:54:39 -07001022 *
Frank Mayharbb34d922008-09-12 09:54:39 -07001023 * Check the task and thread group timers. If both are zero (there are no
1024 * timers set) return false. Otherwise snapshot the task and thread group
1025 * timers and compare them with the corresponding expiration times. Return
1026 * true if a timer has expired, else return false.
Frank Mayharf06febc2008-09-12 09:54:39 -07001027 */
Thomas Gleixner001f7972019-08-21 21:09:13 +02001028static inline bool fastpath_timer_check(struct task_struct *tsk)
Frank Mayharf06febc2008-09-12 09:54:39 -07001029{
Oleg Nesterovad133ba2008-11-17 15:39:47 +01001030 struct signal_struct *sig;
Frank Mayharf06febc2008-09-12 09:54:39 -07001031
Thomas Gleixner2bbdbda2019-08-21 21:09:19 +02001032 if (!expiry_cache_is_inactive(&tsk->posix_cputimers)) {
Thomas Gleixner001f7972019-08-21 21:09:13 +02001033 u64 samples[CPUCLOCK_MAX];
Frank Mayharbb34d922008-09-12 09:54:39 -07001034
Thomas Gleixner001f7972019-08-21 21:09:13 +02001035 task_sample_cputime(tsk, samples);
Thomas Gleixner87dc6442019-08-26 20:22:24 +02001036 if (task_cputimers_expired(samples, &tsk->posix_cputimers))
Thomas Gleixner001f7972019-08-21 21:09:13 +02001037 return true;
Frank Mayharbb34d922008-09-12 09:54:39 -07001038 }
Oleg Nesterovad133ba2008-11-17 15:39:47 +01001039
1040 sig = tsk->signal;
Jason Lowc8d75aa2015-10-14 12:07:56 -07001041 /*
1042 * Check if thread group timers expired when the cputimer is
1043 * running and no other thread in the group is already checking
1044 * for thread group cputimers. These fields are read without the
1045 * sighand lock. However, this is fine because this is meant to
1046 * be a fastpath heuristic to determine whether we should try to
1047 * acquire the sighand lock to check/handle timers.
1048 *
1049 * In the worst case scenario, if 'running' or 'checking_timer' gets
1050 * set but the current thread doesn't see the change yet, we'll wait
1051 * until the next thread in the group gets a scheduler interrupt to
1052 * handle the timer. This isn't an issue in practice because these
1053 * types of delays with signals actually getting sent are expected.
1054 */
1055 if (READ_ONCE(sig->cputimer.running) &&
1056 !READ_ONCE(sig->cputimer.checking_timer)) {
Thomas Gleixner001f7972019-08-21 21:09:13 +02001057 u64 samples[CPUCLOCK_MAX];
Frank Mayharbb34d922008-09-12 09:54:39 -07001058
Thomas Gleixner001f7972019-08-21 21:09:13 +02001059 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1060 samples);
Oleg Nesterov8d1f4312010-06-11 20:04:46 +02001061
Thomas Gleixner87dc6442019-08-26 20:22:24 +02001062 if (task_cputimers_expired(samples, &sig->posix_cputimers))
Thomas Gleixner001f7972019-08-21 21:09:13 +02001063 return true;
Frank Mayharbb34d922008-09-12 09:54:39 -07001064 }
Oleg Nesterov37bebc72009-03-23 20:34:11 +01001065
Juri Lelli34be3932017-12-12 12:10:24 +01001066 if (dl_task(tsk) && tsk->dl.dl_overrun)
Thomas Gleixner001f7972019-08-21 21:09:13 +02001067 return true;
Juri Lelli34be3932017-12-12 12:10:24 +01001068
Thomas Gleixner001f7972019-08-21 21:09:13 +02001069 return false;
Frank Mayharf06febc2008-09-12 09:54:39 -07001070}
1071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072/*
1073 * This is called from the timer interrupt handler. The irq handler has
1074 * already updated our counts. We need to check if any timers fire now.
1075 * Interrupts are disabled.
1076 */
Thomas Gleixnerdce3e8f2019-08-19 16:31:47 +02001077void run_posix_cpu_timers(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078{
Thomas Gleixnerdce3e8f2019-08-19 16:31:47 +02001079 struct task_struct *tsk = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 struct k_itimer *timer, *next;
Oleg Nesterov0bdd2ed2010-06-11 01:10:18 +02001081 unsigned long flags;
Thomas Gleixnerdce3e8f2019-08-19 16:31:47 +02001082 LIST_HEAD(firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
Frederic Weisbeckera6968222017-11-06 16:01:28 +01001084 lockdep_assert_irqs_disabled();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001087 * The fast path checks that there are no expired thread or thread
Frank Mayharbb34d922008-09-12 09:54:39 -07001088 * group timers. If that's so, just return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 */
Frank Mayharbb34d922008-09-12 09:54:39 -07001090 if (!fastpath_timer_check(tsk))
Frank Mayharf06febc2008-09-12 09:54:39 -07001091 return;
Ingo Molnar5ce73a42008-09-14 17:11:46 +02001092
Oleg Nesterov0bdd2ed2010-06-11 01:10:18 +02001093 if (!lock_task_sighand(tsk, &flags))
1094 return;
Frank Mayharbb34d922008-09-12 09:54:39 -07001095 /*
1096 * Here we take off tsk->signal->cpu_timers[N] and
1097 * tsk->cpu_timers[N] all the timers that are firing, and
1098 * put them on the firing list.
1099 */
1100 check_thread_timers(tsk, &firing);
Jason Low934715a2015-10-14 12:07:54 -07001101
1102 check_process_timers(tsk, &firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
Frank Mayharbb34d922008-09-12 09:54:39 -07001104 /*
1105 * We must release these locks before taking any timer's lock.
1106 * There is a potential race with timer deletion here, as the
1107 * siglock now protects our private firing list. We have set
1108 * the firing flag in each timer, so that a deletion attempt
1109 * that gets the timer lock before we do will give it up and
1110 * spin until we've taken care of that timer below.
1111 */
Oleg Nesterov0bdd2ed2010-06-11 01:10:18 +02001112 unlock_task_sighand(tsk, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
1114 /*
1115 * Now that all the timers on our list have the firing flag,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001116 * no one will touch their list entries but us. We'll take
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 * each timer's lock before clearing its firing flag, so no
1118 * timer call will interfere.
1119 */
1120 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001121 int cpu_firing;
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 spin_lock(&timer->it_lock);
1124 list_del_init(&timer->it.cpu.entry);
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001125 cpu_firing = timer->it.cpu.firing;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 timer->it.cpu.firing = 0;
1127 /*
1128 * The firing flag is -1 if we collided with a reset
1129 * of the timer, which already reported this
1130 * almost-firing as an overrun. So don't generate an event.
1131 */
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001132 if (likely(cpu_firing >= 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 cpu_timer_fire(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 spin_unlock(&timer->it_lock);
1135 }
1136}
1137
1138/*
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001139 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
Frank Mayharf06febc2008-09-12 09:54:39 -07001140 * The tsk->sighand->siglock must be held by the caller.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 */
Thomas Gleixner1b0dd962019-08-21 21:09:09 +02001142void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001143 u64 *newval, u64 *oldval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144{
Thomas Gleixner87dc6442019-08-26 20:22:24 +02001145 u64 now, *nextevt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Thomas Gleixner1b0dd962019-08-21 21:09:09 +02001147 if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
Thomas Gleixner692117c2019-08-19 16:31:46 +02001148 return;
1149
Thomas Gleixner87dc6442019-08-26 20:22:24 +02001150 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
Thomas Gleixner1b0dd962019-08-21 21:09:09 +02001151 now = cpu_clock_sample_group(clkid, tsk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
Thomas Gleixner5405d002019-08-21 21:08:59 +02001153 if (oldval) {
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001154 /*
1155 * We are setting itimer. The *oldval is absolute and we update
1156 * it to be relative, *newval argument is relative and we update
1157 * it to be absolute.
1158 */
Martin Schwidefsky64861632011-12-15 14:56:09 +01001159 if (*oldval) {
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001160 if (*oldval <= now) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 /* Just about to fire. */
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001162 *oldval = TICK_NSEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 } else {
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001164 *oldval -= now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 }
1166 }
1167
Martin Schwidefsky64861632011-12-15 14:56:09 +01001168 if (!*newval)
Frederic Weisbeckerb7878302015-07-17 22:25:49 +02001169 return;
Frederic Weisbecker858cf3a2017-01-31 04:09:35 +01001170 *newval += now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 }
1172
1173 /*
Thomas Gleixner1b0dd962019-08-21 21:09:09 +02001174 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1175 * expiry cache is also used by RLIMIT_CPU!.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 */
Thomas Gleixner2bbdbda2019-08-21 21:09:19 +02001177 if (*newval < *nextevt)
Thomas Gleixner87dc6442019-08-26 20:22:24 +02001178 *nextevt = *newval;
Frederic Weisbeckerb7878302015-07-17 22:25:49 +02001179
1180 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181}
1182
Toyo Abee4b76552006-09-29 02:00:29 -07001183static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
Thomas Gleixner343d8fc2017-06-13 23:29:14 +02001184 const struct timespec64 *rqtp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185{
Al Viro86a9c442017-06-07 09:42:26 +01001186 struct itimerspec64 it;
Thomas Gleixner343d8fc2017-06-13 23:29:14 +02001187 struct k_itimer timer;
1188 u64 expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 int error;
1190
1191 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 * Set up a temporary timer and then wait for it to go off.
1193 */
1194 memset(&timer, 0, sizeof timer);
1195 spin_lock_init(&timer.it_lock);
1196 timer.it_clock = which_clock;
1197 timer.it_overrun = -1;
1198 error = posix_cpu_timer_create(&timer);
1199 timer.it_process = current;
1200 if (!error) {
Deepa Dinamani5f252b32017-03-26 12:04:17 -07001201 static struct itimerspec64 zero_it;
Al Viroedbeda42017-06-07 09:42:31 +01001202 struct restart_block *restart;
Toyo Abee4b76552006-09-29 02:00:29 -07001203
Al Viroedbeda42017-06-07 09:42:31 +01001204 memset(&it, 0, sizeof(it));
Al Viro86a9c442017-06-07 09:42:26 +01001205 it.it_value = *rqtp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
1207 spin_lock_irq(&timer.it_lock);
Al Viro86a9c442017-06-07 09:42:26 +01001208 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 if (error) {
1210 spin_unlock_irq(&timer.it_lock);
1211 return error;
1212 }
1213
1214 while (!signal_pending(current)) {
Frederic Weisbecker55ccb612013-06-28 00:06:42 +00001215 if (timer.it.cpu.expires == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 /*
Stanislaw Gruszkae6c42c22013-02-15 11:08:11 +01001217 * Our timer fired and was reset, below
1218 * deletion can not fail.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 */
Stanislaw Gruszkae6c42c22013-02-15 11:08:11 +01001220 posix_cpu_timer_del(&timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 spin_unlock_irq(&timer.it_lock);
1222 return 0;
1223 }
1224
1225 /*
1226 * Block until cpu_timer_fire (or a signal) wakes us.
1227 */
1228 __set_current_state(TASK_INTERRUPTIBLE);
1229 spin_unlock_irq(&timer.it_lock);
1230 schedule();
1231 spin_lock_irq(&timer.it_lock);
1232 }
1233
1234 /*
1235 * We were interrupted by a signal.
1236 */
Thomas Gleixner343d8fc2017-06-13 23:29:14 +02001237 expires = timer.it.cpu.expires;
Al Viro86a9c442017-06-07 09:42:26 +01001238 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
Stanislaw Gruszkae6c42c22013-02-15 11:08:11 +01001239 if (!error) {
1240 /*
1241 * Timer is now unarmed, deletion can not fail.
1242 */
1243 posix_cpu_timer_del(&timer);
1244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 spin_unlock_irq(&timer.it_lock);
1246
Stanislaw Gruszkae6c42c22013-02-15 11:08:11 +01001247 while (error == TIMER_RETRY) {
1248 /*
1249 * We need to handle case when timer was or is in the
1250 * middle of firing. In other cases we already freed
1251 * resources.
1252 */
1253 spin_lock_irq(&timer.it_lock);
1254 error = posix_cpu_timer_del(&timer);
1255 spin_unlock_irq(&timer.it_lock);
1256 }
1257
Al Viro86a9c442017-06-07 09:42:26 +01001258 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 /*
1260 * It actually did fire already.
1261 */
1262 return 0;
1263 }
1264
Toyo Abee4b76552006-09-29 02:00:29 -07001265 error = -ERESTART_RESTARTBLOCK;
Al Viro86a9c442017-06-07 09:42:26 +01001266 /*
1267 * Report back to the user the time still remaining.
1268 */
Al Viroedbeda42017-06-07 09:42:31 +01001269 restart = &current->restart_block;
Thomas Gleixner343d8fc2017-06-13 23:29:14 +02001270 restart->nanosleep.expires = expires;
Deepa Dinamanic0edd7c2017-06-24 11:45:06 -07001271 if (restart->nanosleep.type != TT_NONE)
1272 error = nanosleep_copyout(restart, &it.it_value);
Toyo Abee4b76552006-09-29 02:00:29 -07001273 }
1274
1275 return error;
1276}
1277
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +00001278static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1279
1280static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
Thomas Gleixner938e7cf2017-06-13 23:34:33 +02001281 const struct timespec64 *rqtp)
Toyo Abee4b76552006-09-29 02:00:29 -07001282{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08001283 struct restart_block *restart_block = &current->restart_block;
Toyo Abee4b76552006-09-29 02:00:29 -07001284 int error;
1285
1286 /*
1287 * Diagnose required errors first.
1288 */
1289 if (CPUCLOCK_PERTHREAD(which_clock) &&
1290 (CPUCLOCK_PID(which_clock) == 0 ||
Eric W. Biederman01a21972017-04-13 10:32:16 -05001291 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
Toyo Abee4b76552006-09-29 02:00:29 -07001292 return -EINVAL;
1293
Al Viro86a9c442017-06-07 09:42:26 +01001294 error = do_cpu_nanosleep(which_clock, flags, rqtp);
Toyo Abee4b76552006-09-29 02:00:29 -07001295
1296 if (error == -ERESTART_RESTARTBLOCK) {
1297
Thomas Gleixner3751f9f2011-02-01 13:51:20 +00001298 if (flags & TIMER_ABSTIME)
Toyo Abee4b76552006-09-29 02:00:29 -07001299 return -ERESTARTNOHAND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
Toyo Abe1711ef32006-09-29 02:00:28 -07001301 restart_block->fn = posix_cpu_nsleep_restart;
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001302 restart_block->nanosleep.clockid = which_clock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 return error;
1305}
1306
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +00001307static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308{
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001309 clockid_t which_clock = restart_block->nanosleep.clockid;
Deepa Dinamaniad196382017-03-26 12:04:18 -07001310 struct timespec64 t;
Thomas Gleixner97735f22006-01-09 20:52:37 -08001311
Deepa Dinamaniad196382017-03-26 12:04:18 -07001312 t = ns_to_timespec64(restart_block->nanosleep.expires);
Thomas Gleixner97735f22006-01-09 20:52:37 -08001313
Al Viro86a9c442017-06-07 09:42:26 +01001314 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315}
1316
Nick Desaulniers29f1b2b2017-12-28 22:11:36 -05001317#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1318#define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
Thomas Gleixnera924b042006-01-09 20:52:27 -08001320static int process_cpu_clock_getres(const clockid_t which_clock,
Deepa Dinamanid2e3e0c2017-03-26 12:04:15 -07001321 struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322{
1323 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1324}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001325static int process_cpu_clock_get(const clockid_t which_clock,
Deepa Dinamani3c9c12f2017-03-26 12:04:14 -07001326 struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
1328 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1329}
1330static int process_cpu_timer_create(struct k_itimer *timer)
1331{
1332 timer->it_clock = PROCESS_CLOCK;
1333 return posix_cpu_timer_create(timer);
1334}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001335static int process_cpu_nsleep(const clockid_t which_clock, int flags,
Thomas Gleixner938e7cf2017-06-13 23:34:33 +02001336 const struct timespec64 *rqtp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337{
Al Viro99e6c0e2017-06-07 09:42:30 +01001338 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001340static int thread_cpu_clock_getres(const clockid_t which_clock,
Deepa Dinamanid2e3e0c2017-03-26 12:04:15 -07001341 struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342{
1343 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1344}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001345static int thread_cpu_clock_get(const clockid_t which_clock,
Deepa Dinamani3c9c12f2017-03-26 12:04:14 -07001346 struct timespec64 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347{
1348 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1349}
1350static int thread_cpu_timer_create(struct k_itimer *timer)
1351{
1352 timer->it_clock = THREAD_CLOCK;
1353 return posix_cpu_timer_create(timer);
1354}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Christoph Hellwigd3ba5a92017-05-26 12:03:11 +03001356const struct k_clock clock_posix_cpu = {
Thomas Gleixner19769452011-02-01 13:51:06 +00001357 .clock_getres = posix_cpu_clock_getres,
1358 .clock_set = posix_cpu_clock_set,
1359 .clock_get = posix_cpu_clock_get,
1360 .timer_create = posix_cpu_timer_create,
1361 .nsleep = posix_cpu_nsleep,
Thomas Gleixner19769452011-02-01 13:51:06 +00001362 .timer_set = posix_cpu_timer_set,
1363 .timer_del = posix_cpu_timer_del,
1364 .timer_get = posix_cpu_timer_get,
Thomas Gleixnerf37fb0a2017-05-30 23:15:47 +02001365 .timer_rearm = posix_cpu_timer_rearm,
Thomas Gleixner19769452011-02-01 13:51:06 +00001366};
1367
Christoph Hellwigd3ba5a92017-05-26 12:03:11 +03001368const struct k_clock clock_process = {
1369 .clock_getres = process_cpu_clock_getres,
1370 .clock_get = process_cpu_clock_get,
1371 .timer_create = process_cpu_timer_create,
1372 .nsleep = process_cpu_nsleep,
Christoph Hellwigd3ba5a92017-05-26 12:03:11 +03001373};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
Christoph Hellwigd3ba5a92017-05-26 12:03:11 +03001375const struct k_clock clock_thread = {
1376 .clock_getres = thread_cpu_clock_getres,
1377 .clock_get = thread_cpu_clock_get,
1378 .timer_create = thread_cpu_timer_create,
1379};