blob: a278cad1d5d6225a52e6bf00b2fd90f98bbe916e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Implement CPU time clocks for the POSIX clock interface.
3 */
4
5#include <linux/sched.h>
6#include <linux/posix-timers.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/errno.h>
Roman Zippelf8bd2252008-05-01 04:34:31 -07008#include <linux/math64.h>
9#include <asm/uaccess.h>
Frank Mayharbb34d922008-09-12 09:54:39 -070010#include <linux/kernel_stat.h>
Xiao Guangrong3f0a5252009-08-10 10:52:30 +080011#include <trace/events/timer.h>
Nick Kossifidis61337052012-12-16 22:18:11 -050012#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Frank Mayharf06febc2008-09-12 09:54:39 -070014/*
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -080015 * Called after updating RLIMIT_CPU to run cpu timer and update
16 * tsk->signal->cputime_expires expiration cache if necessary. Needs
17 * siglock protection since other code may update expiration cache as
18 * well.
Frank Mayharf06febc2008-09-12 09:54:39 -070019 */
Jiri Slaby5ab46b32009-08-28 14:05:12 +020020void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
Frank Mayharf06febc2008-09-12 09:54:39 -070021{
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +020022 cputime_t cputime = secs_to_cputime(rlim_new);
Frank Mayharf06febc2008-09-12 09:54:39 -070023
Jiri Slaby5ab46b32009-08-28 14:05:12 +020024 spin_lock_irq(&task->sighand->siglock);
25 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
26 spin_unlock_irq(&task->sighand->siglock);
Frank Mayharf06febc2008-09-12 09:54:39 -070027}
28
Thomas Gleixnera924b042006-01-09 20:52:27 -080029static int check_clock(const clockid_t which_clock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
31 int error = 0;
32 struct task_struct *p;
33 const pid_t pid = CPUCLOCK_PID(which_clock);
34
35 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
36 return -EINVAL;
37
38 if (pid == 0)
39 return 0;
40
Sergey Senozhatskyc0deae82010-11-03 18:52:56 +020041 rcu_read_lock();
Pavel Emelyanov8dc86af2008-02-08 04:21:52 -080042 p = find_task_by_vpid(pid);
Pavel Emelyanovbac0abd2007-10-18 23:40:18 -070043 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
Sergey Senozhatskyc0deae82010-11-03 18:52:56 +020044 same_thread_group(p, current) : has_group_leader_pid(p))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 error = -EINVAL;
46 }
Sergey Senozhatskyc0deae82010-11-03 18:52:56 +020047 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49 return error;
50}
51
52static inline union cpu_time_count
Thomas Gleixnera924b042006-01-09 20:52:27 -080053timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
55 union cpu_time_count ret;
56 ret.sched = 0; /* high half always zero when .cpu used */
57 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
Oleg Nesterovee500f22005-11-28 13:43:55 -080058 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 } else {
60 ret.cpu = timespec_to_cputime(tp);
61 }
62 return ret;
63}
64
Thomas Gleixnera924b042006-01-09 20:52:27 -080065static void sample_to_timespec(const clockid_t which_clock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 union cpu_time_count cpu,
67 struct timespec *tp)
68{
Roman Zippelf8bd2252008-05-01 04:34:31 -070069 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
70 *tp = ns_to_timespec(cpu.sched);
71 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 cputime_to_timespec(cpu.cpu, tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Thomas Gleixnera924b042006-01-09 20:52:27 -080075static inline int cpu_time_before(const clockid_t which_clock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 union cpu_time_count now,
77 union cpu_time_count then)
78{
79 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
80 return now.sched < then.sched;
81 } else {
Martin Schwidefsky64861632011-12-15 14:56:09 +010082 return now.cpu < then.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 }
84}
Thomas Gleixnera924b042006-01-09 20:52:27 -080085static inline void cpu_time_add(const clockid_t which_clock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 union cpu_time_count *acc,
87 union cpu_time_count val)
88{
89 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
90 acc->sched += val.sched;
91 } else {
Martin Schwidefsky64861632011-12-15 14:56:09 +010092 acc->cpu += val.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 }
94}
Thomas Gleixnera924b042006-01-09 20:52:27 -080095static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 union cpu_time_count a,
97 union cpu_time_count b)
98{
99 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
100 a.sched -= b.sched;
101 } else {
Martin Schwidefsky64861632011-12-15 14:56:09 +0100102 a.cpu -= b.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 }
104 return a;
105}
106
107/*
108 * Update expiry time from increment, and increase overrun count,
109 * given the current clock sample.
110 */
Oleg Nesterov7a4ed932005-10-26 20:26:53 +0400111static void bump_cpu_timer(struct k_itimer *timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 union cpu_time_count now)
113{
114 int i;
115
116 if (timer->it.cpu.incr.sched == 0)
117 return;
118
119 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
120 unsigned long long delta, incr;
121
122 if (now.sched < timer->it.cpu.expires.sched)
123 return;
124 incr = timer->it.cpu.incr.sched;
125 delta = now.sched + incr - timer->it.cpu.expires.sched;
126 /* Don't use (incr*2 < delta), incr*2 might overflow. */
127 for (i = 0; incr < delta - incr; i++)
128 incr = incr << 1;
129 for (; i >= 0; incr >>= 1, i--) {
Oleg Nesterov7a4ed932005-10-26 20:26:53 +0400130 if (delta < incr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 continue;
132 timer->it.cpu.expires.sched += incr;
133 timer->it_overrun += 1 << i;
134 delta -= incr;
135 }
136 } else {
137 cputime_t delta, incr;
138
Martin Schwidefsky64861632011-12-15 14:56:09 +0100139 if (now.cpu < timer->it.cpu.expires.cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 return;
141 incr = timer->it.cpu.incr.cpu;
Martin Schwidefsky64861632011-12-15 14:56:09 +0100142 delta = now.cpu + incr - timer->it.cpu.expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 /* Don't use (incr*2 < delta), incr*2 might overflow. */
Martin Schwidefsky64861632011-12-15 14:56:09 +0100144 for (i = 0; incr < delta - incr; i++)
145 incr += incr;
146 for (; i >= 0; incr = incr >> 1, i--) {
147 if (delta < incr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 continue;
Martin Schwidefsky64861632011-12-15 14:56:09 +0100149 timer->it.cpu.expires.cpu += incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 timer->it_overrun += 1 << i;
Martin Schwidefsky64861632011-12-15 14:56:09 +0100151 delta -= incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 }
153 }
154}
155
156static inline cputime_t prof_ticks(struct task_struct *p)
157{
Martin Schwidefsky64861632011-12-15 14:56:09 +0100158 return p->utime + p->stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159}
160static inline cputime_t virt_ticks(struct task_struct *p)
161{
162 return p->utime;
163}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000165static int
166posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int error = check_clock(which_clock);
169 if (!error) {
170 tp->tv_sec = 0;
171 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
172 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
173 /*
174 * If sched_clock is using a cycle counter, we
175 * don't have any idea of its true resolution
176 * exported, but it is much more than 1s/HZ.
177 */
178 tp->tv_nsec = 1;
179 }
180 }
181 return error;
182}
183
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000184static int
185posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 /*
188 * You can never reset a CPU clock, but we check for other errors
189 * in the call before failing with EPERM.
190 */
191 int error = check_clock(which_clock);
192 if (error == 0) {
193 error = -EPERM;
194 }
195 return error;
196}
197
198
199/*
200 * Sample a per-thread clock for the given task.
201 */
Thomas Gleixnera924b042006-01-09 20:52:27 -0800202static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 union cpu_time_count *cpu)
204{
205 switch (CPUCLOCK_WHICH(which_clock)) {
206 default:
207 return -EINVAL;
208 case CPUCLOCK_PROF:
209 cpu->cpu = prof_ticks(p);
210 break;
211 case CPUCLOCK_VIRT:
212 cpu->cpu = virt_ticks(p);
213 break;
214 case CPUCLOCK_SCHED:
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +0900215 cpu->sched = task_sched_runtime(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 break;
217 }
218 return 0;
219}
220
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100221static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
222{
Martin Schwidefsky64861632011-12-15 14:56:09 +0100223 if (b->utime > a->utime)
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100224 a->utime = b->utime;
225
Martin Schwidefsky64861632011-12-15 14:56:09 +0100226 if (b->stime > a->stime)
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100227 a->stime = b->stime;
228
229 if (b->sum_exec_runtime > a->sum_exec_runtime)
230 a->sum_exec_runtime = b->sum_exec_runtime;
231}
232
233void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
234{
235 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
236 struct task_cputime sum;
237 unsigned long flags;
238
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100239 if (!cputimer->running) {
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100240 /*
241 * The POSIX timer interface allows for absolute time expiry
242 * values through the TIMER_ABSTIME flag, therefore we have
243 * to synchronize the timer to the clock every time we start
244 * it.
245 */
246 thread_group_cputime(tsk, &sum);
Linus Torvalds3cfef952011-10-26 16:17:32 +0200247 raw_spin_lock_irqsave(&cputimer->lock, flags);
Peter Zijlstrabcd5cff2011-10-17 11:50:30 +0200248 cputimer->running = 1;
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100249 update_gt_cputime(&cputimer->cputime, &sum);
Peter Zijlstrabcd5cff2011-10-17 11:50:30 +0200250 } else
Linus Torvalds3cfef952011-10-26 16:17:32 +0200251 raw_spin_lock_irqsave(&cputimer->lock, flags);
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100252 *times = cputimer->cputime;
Thomas Gleixneree30a7b2009-07-25 18:56:56 +0200253 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100254}
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256/*
257 * Sample a process (thread group) clock for the given group_leader task.
258 * Must be called with tasklist_lock held for reading.
259 */
Thomas Gleixnera924b042006-01-09 20:52:27 -0800260static int cpu_clock_sample_group(const clockid_t which_clock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 struct task_struct *p,
262 union cpu_time_count *cpu)
263{
Frank Mayharbb34d922008-09-12 09:54:39 -0700264 struct task_cputime cputime;
265
Petr Tesarikeccdaea2008-11-24 15:46:31 +0100266 switch (CPUCLOCK_WHICH(which_clock)) {
Frank Mayharbb34d922008-09-12 09:54:39 -0700267 default:
268 return -EINVAL;
269 case CPUCLOCK_PROF:
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +0900270 thread_group_cputime(p, &cputime);
Martin Schwidefsky64861632011-12-15 14:56:09 +0100271 cpu->cpu = cputime.utime + cputime.stime;
Frank Mayharbb34d922008-09-12 09:54:39 -0700272 break;
273 case CPUCLOCK_VIRT:
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +0900274 thread_group_cputime(p, &cputime);
Frank Mayharbb34d922008-09-12 09:54:39 -0700275 cpu->cpu = cputime.utime;
276 break;
277 case CPUCLOCK_SCHED:
Peter Zijlstrad670ec12011-09-01 12:42:04 +0200278 thread_group_cputime(p, &cputime);
279 cpu->sched = cputime.sum_exec_runtime;
Frank Mayharbb34d922008-09-12 09:54:39 -0700280 break;
281 }
282 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
285
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000286static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
288 const pid_t pid = CPUCLOCK_PID(which_clock);
289 int error = -EINVAL;
290 union cpu_time_count rtn;
291
292 if (pid == 0) {
293 /*
294 * Special case constant value for our own clocks.
295 * We don't have to do any lookup to find ourselves.
296 */
297 if (CPUCLOCK_PERTHREAD(which_clock)) {
298 /*
299 * Sampling just ourselves we can do with no locking.
300 */
301 error = cpu_clock_sample(which_clock,
302 current, &rtn);
303 } else {
304 read_lock(&tasklist_lock);
305 error = cpu_clock_sample_group(which_clock,
306 current, &rtn);
307 read_unlock(&tasklist_lock);
308 }
309 } else {
310 /*
311 * Find the given PID, and validate that the caller
312 * should be able to see it.
313 */
314 struct task_struct *p;
Paul E. McKenney1f2ea082007-02-16 01:28:22 -0800315 rcu_read_lock();
Pavel Emelyanov8dc86af2008-02-08 04:21:52 -0800316 p = find_task_by_vpid(pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 if (p) {
318 if (CPUCLOCK_PERTHREAD(which_clock)) {
Pavel Emelyanovbac0abd2007-10-18 23:40:18 -0700319 if (same_thread_group(p, current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 error = cpu_clock_sample(which_clock,
321 p, &rtn);
322 }
Paul E. McKenney1f2ea082007-02-16 01:28:22 -0800323 } else {
324 read_lock(&tasklist_lock);
Oleg Nesterovd30fda32010-05-26 14:43:13 -0700325 if (thread_group_leader(p) && p->sighand) {
Paul E. McKenney1f2ea082007-02-16 01:28:22 -0800326 error =
327 cpu_clock_sample_group(which_clock,
328 p, &rtn);
329 }
330 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 }
332 }
Paul E. McKenney1f2ea082007-02-16 01:28:22 -0800333 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 }
335
336 if (error)
337 return error;
338 sample_to_timespec(which_clock, rtn, tp);
339 return 0;
340}
341
342
343/*
344 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
Stanislaw Gruszkaba5ea952009-11-17 14:14:13 -0800345 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
346 * new timer already all-zeros initialized.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 */
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000348static int posix_cpu_timer_create(struct k_itimer *new_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349{
350 int ret = 0;
351 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
352 struct task_struct *p;
353
354 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
355 return -EINVAL;
356
357 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Sergey Senozhatskyc0deae82010-11-03 18:52:56 +0200359 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
361 if (pid == 0) {
362 p = current;
363 } else {
Pavel Emelyanov8dc86af2008-02-08 04:21:52 -0800364 p = find_task_by_vpid(pid);
Pavel Emelyanovbac0abd2007-10-18 23:40:18 -0700365 if (p && !same_thread_group(p, current))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 p = NULL;
367 }
368 } else {
369 if (pid == 0) {
370 p = current->group_leader;
371 } else {
Pavel Emelyanov8dc86af2008-02-08 04:21:52 -0800372 p = find_task_by_vpid(pid);
Sergey Senozhatskyc0deae82010-11-03 18:52:56 +0200373 if (p && !has_group_leader_pid(p))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 p = NULL;
375 }
376 }
377 new_timer->it.cpu.task = p;
378 if (p) {
379 get_task_struct(p);
380 } else {
381 ret = -EINVAL;
382 }
Sergey Senozhatskyc0deae82010-11-03 18:52:56 +0200383 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
385 return ret;
386}
387
388/*
389 * Clean up a CPU-clock timer that is about to be destroyed.
390 * This is called from timer deletion with the timer already locked.
391 * If we return TIMER_RETRY, it's necessary to release the timer's lock
392 * and try again. (This happens when the timer is in the middle of firing.)
393 */
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000394static int posix_cpu_timer_del(struct k_itimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395{
396 struct task_struct *p = timer->it.cpu.task;
Oleg Nesterov108150e2005-10-23 20:25:39 +0400397 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Oleg Nesterov108150e2005-10-23 20:25:39 +0400399 if (likely(p != NULL)) {
Linus Torvalds9465bee2005-10-21 15:36:00 -0700400 read_lock(&tasklist_lock);
Oleg Nesterovd30fda32010-05-26 14:43:13 -0700401 if (unlikely(p->sighand == NULL)) {
Linus Torvalds9465bee2005-10-21 15:36:00 -0700402 /*
403 * We raced with the reaping of the task.
404 * The deletion should have cleared us off the list.
405 */
406 BUG_ON(!list_empty(&timer->it.cpu.entry));
407 } else {
Linus Torvalds9465bee2005-10-21 15:36:00 -0700408 spin_lock(&p->sighand->siglock);
Oleg Nesterov108150e2005-10-23 20:25:39 +0400409 if (timer->it.cpu.firing)
410 ret = TIMER_RETRY;
411 else
412 list_del(&timer->it.cpu.entry);
Linus Torvalds9465bee2005-10-21 15:36:00 -0700413 spin_unlock(&p->sighand->siglock);
414 }
415 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Oleg Nesterov108150e2005-10-23 20:25:39 +0400417 if (!ret)
418 put_task_struct(p);
419 }
420
421 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422}
423
424/*
425 * Clean out CPU timers still ticking when a thread exited. The task
426 * pointer is cleared, and the expiry time is replaced with the residual
427 * time for later timer_gettime calls to return.
428 * This must be called with the siglock held.
429 */
430static void cleanup_timers(struct list_head *head,
431 cputime_t utime, cputime_t stime,
Ingo Molnar41b86e92007-07-09 18:51:58 +0200432 unsigned long long sum_exec_runtime)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 struct cpu_timer_list *timer, *next;
Martin Schwidefsky64861632011-12-15 14:56:09 +0100435 cputime_t ptime = utime + stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
437 list_for_each_entry_safe(timer, next, head, entry) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 list_del_init(&timer->entry);
Martin Schwidefsky64861632011-12-15 14:56:09 +0100439 if (timer->expires.cpu < ptime) {
440 timer->expires.cpu = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 } else {
Martin Schwidefsky64861632011-12-15 14:56:09 +0100442 timer->expires.cpu -= ptime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 }
444 }
445
446 ++head;
447 list_for_each_entry_safe(timer, next, head, entry) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 list_del_init(&timer->entry);
Martin Schwidefsky64861632011-12-15 14:56:09 +0100449 if (timer->expires.cpu < utime) {
450 timer->expires.cpu = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 } else {
Martin Schwidefsky64861632011-12-15 14:56:09 +0100452 timer->expires.cpu -= utime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 }
454 }
455
456 ++head;
457 list_for_each_entry_safe(timer, next, head, entry) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 list_del_init(&timer->entry);
Ingo Molnar41b86e92007-07-09 18:51:58 +0200459 if (timer->expires.sched < sum_exec_runtime) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 timer->expires.sched = 0;
461 } else {
Ingo Molnar41b86e92007-07-09 18:51:58 +0200462 timer->expires.sched -= sum_exec_runtime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 }
464 }
465}
466
467/*
468 * These are both called with the siglock held, when the current thread
469 * is being reaped. When the final (leader) thread in the group is reaped,
470 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
471 */
472void posix_cpu_timers_exit(struct task_struct *tsk)
473{
Nick Kossifidis61337052012-12-16 22:18:11 -0500474 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
475 sizeof(unsigned long long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 cleanup_timers(tsk->cpu_timers,
Ingo Molnar41b86e92007-07-09 18:51:58 +0200477 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479}
480void posix_cpu_timers_exit_group(struct task_struct *tsk)
481{
Stanislaw Gruszka17d42c12009-08-06 16:03:30 -0700482 struct signal_struct *const sig = tsk->signal;
Frank Mayharf06febc2008-09-12 09:54:39 -0700483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 cleanup_timers(tsk->signal->cpu_timers,
Martin Schwidefsky64861632011-12-15 14:56:09 +0100485 tsk->utime + sig->utime, tsk->stime + sig->stime,
Stanislaw Gruszka17d42c12009-08-06 16:03:30 -0700486 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487}
488
489static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
490{
491 /*
492 * That's all for this thread or process.
493 * We leave our residual in expires to be reported.
494 */
495 put_task_struct(timer->it.cpu.task);
496 timer->it.cpu.task = NULL;
497 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
498 timer->it.cpu.expires,
499 now);
500}
501
Stanislaw Gruszkad1e3b6d2009-07-29 12:15:28 +0200502static inline int expires_gt(cputime_t expires, cputime_t new_exp)
503{
Martin Schwidefsky64861632011-12-15 14:56:09 +0100504 return expires == 0 || expires > new_exp;
Stanislaw Gruszkad1e3b6d2009-07-29 12:15:28 +0200505}
506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507/*
508 * Insert the timer on the appropriate list before any timers that
509 * expire later. This must be called with the tasklist_lock held
Stanislaw Gruszkac2873932010-03-11 14:04:42 -0800510 * for reading, interrupts disabled and p->sighand->siglock taken.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 */
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800512static void arm_timer(struct k_itimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
514 struct task_struct *p = timer->it.cpu.task;
515 struct list_head *head, *listpos;
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800516 struct task_cputime *cputime_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 struct cpu_timer_list *const nt = &timer->it.cpu;
518 struct cpu_timer_list *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800520 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
521 head = p->cpu_timers;
522 cputime_expires = &p->cputime_expires;
523 } else {
524 head = p->signal->cpu_timers;
525 cputime_expires = &p->signal->cputime_expires;
526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 head += CPUCLOCK_WHICH(timer->it_clock);
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 listpos = head;
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800530 list_for_each_entry(next, head, entry) {
531 if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
532 break;
533 listpos = &next->entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 }
535 list_add(&nt->entry, listpos);
536
537 if (listpos == head) {
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800538 union cpu_time_count *exp = &nt->expires;
539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 /*
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800541 * We are the new earliest-expiring POSIX 1.b timer, hence
542 * need to update expiration cache. Take into account that
543 * for process timers we share expiration cache with itimers
544 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 */
546
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800547 switch (CPUCLOCK_WHICH(timer->it_clock)) {
548 case CPUCLOCK_PROF:
549 if (expires_gt(cputime_expires->prof_exp, exp->cpu))
550 cputime_expires->prof_exp = exp->cpu;
551 break;
552 case CPUCLOCK_VIRT:
553 if (expires_gt(cputime_expires->virt_exp, exp->cpu))
554 cputime_expires->virt_exp = exp->cpu;
555 break;
556 case CPUCLOCK_SCHED:
557 if (cputime_expires->sched_exp == 0 ||
558 cputime_expires->sched_exp > exp->sched)
559 cputime_expires->sched_exp = exp->sched;
560 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 }
562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563}
564
565/*
566 * The timer is locked, fire it and arrange for its reload.
567 */
568static void cpu_timer_fire(struct k_itimer *timer)
569{
Stanislaw Gruszka1f169f82010-03-11 14:04:41 -0800570 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
571 /*
572 * User don't want any signal.
573 */
574 timer->it.cpu.expires.sched = 0;
575 } else if (unlikely(timer->sigq == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 /*
577 * This a special case for clock_nanosleep,
578 * not a normal timer from sys_timer_create.
579 */
580 wake_up_process(timer->it_process);
581 timer->it.cpu.expires.sched = 0;
582 } else if (timer->it.cpu.incr.sched == 0) {
583 /*
584 * One-shot timer. Clear it as soon as it's fired.
585 */
586 posix_timer_event(timer, 0);
587 timer->it.cpu.expires.sched = 0;
588 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
589 /*
590 * The signal did not get queued because the signal
591 * was ignored, so we won't get any callback to
592 * reload the timer. But we need to keep it
593 * ticking in case the signal is deliverable next time.
594 */
595 posix_cpu_timer_schedule(timer);
596 }
597}
598
599/*
Peter Zijlstra3997ad32009-02-12 15:00:52 +0100600 * Sample a process (thread group) timer for the given group_leader task.
601 * Must be called with tasklist_lock held for reading.
602 */
603static int cpu_timer_sample_group(const clockid_t which_clock,
604 struct task_struct *p,
605 union cpu_time_count *cpu)
606{
607 struct task_cputime cputime;
608
609 thread_group_cputimer(p, &cputime);
610 switch (CPUCLOCK_WHICH(which_clock)) {
611 default:
612 return -EINVAL;
613 case CPUCLOCK_PROF:
Martin Schwidefsky64861632011-12-15 14:56:09 +0100614 cpu->cpu = cputime.utime + cputime.stime;
Peter Zijlstra3997ad32009-02-12 15:00:52 +0100615 break;
616 case CPUCLOCK_VIRT:
617 cpu->cpu = cputime.utime;
618 break;
619 case CPUCLOCK_SCHED:
620 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
621 break;
622 }
623 return 0;
624}
625
626/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 * Guts of sys_timer_settime for CPU timers.
628 * This is called with the timer locked and interrupts disabled.
629 * If we return TIMER_RETRY, it's necessary to release the timer's lock
630 * and try again. (This happens when the timer is in the middle of firing.)
631 */
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000632static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
633 struct itimerspec *new, struct itimerspec *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
635 struct task_struct *p = timer->it.cpu.task;
Stanislaw Gruszkaae1a78e2010-03-11 14:04:39 -0800636 union cpu_time_count old_expires, new_expires, old_incr, val;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 int ret;
638
639 if (unlikely(p == NULL)) {
640 /*
641 * Timer refers to a dead task's clock.
642 */
643 return -ESRCH;
644 }
645
646 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
647
648 read_lock(&tasklist_lock);
649 /*
650 * We need the tasklist_lock to protect against reaping that
Oleg Nesterovd30fda32010-05-26 14:43:13 -0700651 * clears p->sighand. If p has just been reaped, we can no
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 * longer get any information about it at all.
653 */
Oleg Nesterovd30fda32010-05-26 14:43:13 -0700654 if (unlikely(p->sighand == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 read_unlock(&tasklist_lock);
656 put_task_struct(p);
657 timer->it.cpu.task = NULL;
658 return -ESRCH;
659 }
660
661 /*
662 * Disarm any old timer after extracting its expiry time.
663 */
664 BUG_ON(!irqs_disabled());
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400665
666 ret = 0;
Stanislaw Gruszkaae1a78e2010-03-11 14:04:39 -0800667 old_incr = timer->it.cpu.incr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 spin_lock(&p->sighand->siglock);
669 old_expires = timer->it.cpu.expires;
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400670 if (unlikely(timer->it.cpu.firing)) {
671 timer->it.cpu.firing = -1;
672 ret = TIMER_RETRY;
673 } else
674 list_del_init(&timer->it.cpu.entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
676 /*
677 * We need to sample the current value to convert the new
678 * value from to relative and absolute, and to convert the
679 * old value from absolute to relative. To set a process
680 * timer, we need a sample to balance the thread expiry
681 * times (in arm_timer). With an absolute time, we must
682 * check if it's already passed. In short, we need a sample.
683 */
684 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
685 cpu_clock_sample(timer->it_clock, p, &val);
686 } else {
Peter Zijlstra3997ad32009-02-12 15:00:52 +0100687 cpu_timer_sample_group(timer->it_clock, p, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 }
689
690 if (old) {
691 if (old_expires.sched == 0) {
692 old->it_value.tv_sec = 0;
693 old->it_value.tv_nsec = 0;
694 } else {
695 /*
696 * Update the timer in case it has
697 * overrun already. If it has,
698 * we'll report it as having overrun
699 * and with the next reloaded timer
700 * already ticking, though we are
701 * swallowing that pending
702 * notification here to install the
703 * new setting.
704 */
705 bump_cpu_timer(timer, val);
706 if (cpu_time_before(timer->it_clock, val,
707 timer->it.cpu.expires)) {
708 old_expires = cpu_time_sub(
709 timer->it_clock,
710 timer->it.cpu.expires, val);
711 sample_to_timespec(timer->it_clock,
712 old_expires,
713 &old->it_value);
714 } else {
715 old->it_value.tv_nsec = 1;
716 old->it_value.tv_sec = 0;
717 }
718 }
719 }
720
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400721 if (unlikely(ret)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 /*
723 * We are colliding with the timer actually firing.
724 * Punt after filling in the timer's old value, and
725 * disable this firing since we are already reporting
726 * it as an overrun (thanks to bump_cpu_timer above).
727 */
Stanislaw Gruszkac2873932010-03-11 14:04:42 -0800728 spin_unlock(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 goto out;
731 }
732
733 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
734 cpu_time_add(timer->it_clock, &new_expires, val);
735 }
736
737 /*
738 * Install the new expiry time (or zero).
739 * For a timer with no notification action, we don't actually
740 * arm the timer (we'll just fake it for timer_gettime).
741 */
742 timer->it.cpu.expires = new_expires;
743 if (new_expires.sched != 0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 cpu_time_before(timer->it_clock, val, new_expires)) {
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -0800745 arm_timer(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 }
747
Stanislaw Gruszkac2873932010-03-11 14:04:42 -0800748 spin_unlock(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 read_unlock(&tasklist_lock);
750
751 /*
752 * Install the new reload setting, and
753 * set up the signal and overrun bookkeeping.
754 */
755 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
756 &new->it_interval);
757
758 /*
759 * This acts as a modification timestamp for the timer,
760 * so any automatic reload attempt will punt on seeing
761 * that we have reset the timer manually.
762 */
763 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
764 ~REQUEUE_PENDING;
765 timer->it_overrun_last = 0;
766 timer->it_overrun = -1;
767
768 if (new_expires.sched != 0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 !cpu_time_before(timer->it_clock, val, new_expires)) {
770 /*
771 * The designated time already passed, so we notify
772 * immediately, even if the thread never runs to
773 * accumulate more time on this clock.
774 */
775 cpu_timer_fire(timer);
776 }
777
778 ret = 0;
779 out:
780 if (old) {
781 sample_to_timespec(timer->it_clock,
Stanislaw Gruszkaae1a78e2010-03-11 14:04:39 -0800782 old_incr, &old->it_interval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 }
784 return ret;
785}
786
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +0000787static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
789 union cpu_time_count now;
790 struct task_struct *p = timer->it.cpu.task;
791 int clear_dead;
792
793 /*
794 * Easy part: convert the reload time.
795 */
796 sample_to_timespec(timer->it_clock,
797 timer->it.cpu.incr, &itp->it_interval);
798
799 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
800 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
801 return;
802 }
803
804 if (unlikely(p == NULL)) {
805 /*
806 * This task already died and the timer will never fire.
807 * In this case, expires is actually the dead value.
808 */
809 dead:
810 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
811 &itp->it_value);
812 return;
813 }
814
815 /*
816 * Sample the clock to take the difference with the expiry time.
817 */
818 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
819 cpu_clock_sample(timer->it_clock, p, &now);
820 clear_dead = p->exit_state;
821 } else {
822 read_lock(&tasklist_lock);
Oleg Nesterovd30fda32010-05-26 14:43:13 -0700823 if (unlikely(p->sighand == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 /*
825 * The process has been reaped.
826 * We can't even collect a sample any more.
827 * Call the timer disarmed, nothing else to do.
828 */
829 put_task_struct(p);
830 timer->it.cpu.task = NULL;
831 timer->it.cpu.expires.sched = 0;
832 read_unlock(&tasklist_lock);
833 goto dead;
834 } else {
Peter Zijlstra3997ad32009-02-12 15:00:52 +0100835 cpu_timer_sample_group(timer->it_clock, p, &now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 clear_dead = (unlikely(p->exit_state) &&
837 thread_group_empty(p));
838 }
839 read_unlock(&tasklist_lock);
840 }
841
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 if (unlikely(clear_dead)) {
843 /*
844 * We've noticed that the thread is dead, but
845 * not yet reaped. Take this opportunity to
846 * drop our task ref.
847 */
848 clear_dead_task(timer, now);
849 goto dead;
850 }
851
852 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
853 sample_to_timespec(timer->it_clock,
854 cpu_time_sub(timer->it_clock,
855 timer->it.cpu.expires, now),
856 &itp->it_value);
857 } else {
858 /*
859 * The timer should have expired already, but the firing
860 * hasn't taken place yet. Say it's just about to expire.
861 */
862 itp->it_value.tv_nsec = 1;
863 itp->it_value.tv_sec = 0;
864 }
865}
866
867/*
868 * Check for any per-thread CPU timers that have fired and move them off
869 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
870 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
871 */
872static void check_thread_timers(struct task_struct *tsk,
873 struct list_head *firing)
874{
Linus Torvaldse80eda92005-10-23 10:02:50 -0700875 int maxfire;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 struct list_head *timers = tsk->cpu_timers;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100877 struct signal_struct *const sig = tsk->signal;
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800878 unsigned long soft;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Linus Torvaldse80eda92005-10-23 10:02:50 -0700880 maxfire = 20;
Martin Schwidefsky64861632011-12-15 14:56:09 +0100881 tsk->cputime_expires.prof_exp = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 while (!list_empty(timers)) {
Pavel Emelianovb5e61812007-05-08 00:30:19 -0700883 struct cpu_timer_list *t = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 struct cpu_timer_list,
885 entry);
Martin Schwidefsky64861632011-12-15 14:56:09 +0100886 if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) {
Frank Mayharf06febc2008-09-12 09:54:39 -0700887 tsk->cputime_expires.prof_exp = t->expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 break;
889 }
890 t->firing = 1;
891 list_move_tail(&t->entry, firing);
892 }
893
894 ++timers;
Linus Torvaldse80eda92005-10-23 10:02:50 -0700895 maxfire = 20;
Martin Schwidefsky64861632011-12-15 14:56:09 +0100896 tsk->cputime_expires.virt_exp = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 while (!list_empty(timers)) {
Pavel Emelianovb5e61812007-05-08 00:30:19 -0700898 struct cpu_timer_list *t = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 struct cpu_timer_list,
900 entry);
Martin Schwidefsky64861632011-12-15 14:56:09 +0100901 if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) {
Frank Mayharf06febc2008-09-12 09:54:39 -0700902 tsk->cputime_expires.virt_exp = t->expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 break;
904 }
905 t->firing = 1;
906 list_move_tail(&t->entry, firing);
907 }
908
909 ++timers;
Linus Torvaldse80eda92005-10-23 10:02:50 -0700910 maxfire = 20;
Frank Mayharf06febc2008-09-12 09:54:39 -0700911 tsk->cputime_expires.sched_exp = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 while (!list_empty(timers)) {
Pavel Emelianovb5e61812007-05-08 00:30:19 -0700913 struct cpu_timer_list *t = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 struct cpu_timer_list,
915 entry);
Ingo Molnar41b86e92007-07-09 18:51:58 +0200916 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
Frank Mayharf06febc2008-09-12 09:54:39 -0700917 tsk->cputime_expires.sched_exp = t->expires.sched;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 break;
919 }
920 t->firing = 1;
921 list_move_tail(&t->entry, firing);
922 }
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100923
924 /*
925 * Check for the special case thread timers.
926 */
Jiri Slaby78d7d402010-03-05 13:42:54 -0800927 soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800928 if (soft != RLIM_INFINITY) {
Jiri Slaby78d7d402010-03-05 13:42:54 -0800929 unsigned long hard =
930 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100931
Peter Zijlstra5a52dd52008-01-25 21:08:32 +0100932 if (hard != RLIM_INFINITY &&
933 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100934 /*
935 * At the hard limit, we just die.
936 * No need to calculate anything else now.
937 */
938 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
939 return;
940 }
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800941 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100942 /*
943 * At the soft limit, send a SIGXCPU every second.
944 */
Jiri Slabyd4bb52742010-03-05 13:42:53 -0800945 if (soft < hard) {
946 soft += USEC_PER_SEC;
947 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100948 }
Hiroshi Shimamoto81d50bb2008-05-15 19:42:49 -0700949 printk(KERN_INFO
950 "RT Watchdog Timeout: %s[%d]\n",
951 tsk->comm, task_pid_nr(tsk));
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100952 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
953 }
954 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955}
956
Stanislaw Gruszka15365c12010-03-11 14:04:31 -0800957static void stop_process_timers(struct signal_struct *sig)
Peter Zijlstra3fccfd62009-02-10 16:37:31 +0100958{
Stanislaw Gruszka15365c12010-03-11 14:04:31 -0800959 struct thread_group_cputimer *cputimer = &sig->cputimer;
Peter Zijlstra3fccfd62009-02-10 16:37:31 +0100960 unsigned long flags;
961
Thomas Gleixneree30a7b2009-07-25 18:56:56 +0200962 raw_spin_lock_irqsave(&cputimer->lock, flags);
Peter Zijlstra3fccfd62009-02-10 16:37:31 +0100963 cputimer->running = 0;
Thomas Gleixneree30a7b2009-07-25 18:56:56 +0200964 raw_spin_unlock_irqrestore(&cputimer->lock, flags);
Peter Zijlstra3fccfd62009-02-10 16:37:31 +0100965}
966
Stanislaw Gruszka8356b5f2009-07-29 12:15:27 +0200967static u32 onecputick;
968
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200969static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
970 cputime_t *expires, cputime_t cur_time, int signo)
971{
Martin Schwidefsky64861632011-12-15 14:56:09 +0100972 if (!it->expires)
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200973 return;
974
Martin Schwidefsky64861632011-12-15 14:56:09 +0100975 if (cur_time >= it->expires) {
976 if (it->incr) {
977 it->expires += it->incr;
Stanislaw Gruszka8356b5f2009-07-29 12:15:27 +0200978 it->error += it->incr_error;
979 if (it->error >= onecputick) {
Martin Schwidefsky64861632011-12-15 14:56:09 +0100980 it->expires -= cputime_one_jiffy;
Stanislaw Gruszka8356b5f2009-07-29 12:15:27 +0200981 it->error -= onecputick;
982 }
Xiao Guangrong3f0a5252009-08-10 10:52:30 +0800983 } else {
Martin Schwidefsky64861632011-12-15 14:56:09 +0100984 it->expires = 0;
Xiao Guangrong3f0a5252009-08-10 10:52:30 +0800985 }
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200986
Xiao Guangrong3f0a5252009-08-10 10:52:30 +0800987 trace_itimer_expire(signo == SIGPROF ?
988 ITIMER_PROF : ITIMER_VIRTUAL,
989 tsk->signal->leader_pid, cur_time);
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200990 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
991 }
992
Martin Schwidefsky64861632011-12-15 14:56:09 +0100993 if (it->expires && (!*expires || it->expires < *expires)) {
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +0200994 *expires = it->expires;
995 }
996}
997
Stanislaw Gruszka29f87b72010-04-27 14:12:15 -0700998/**
999 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1000 *
1001 * @cputime: The struct to compare.
1002 *
1003 * Checks @cputime to see if all fields are zero. Returns true if all fields
1004 * are zero, false if any field is nonzero.
1005 */
1006static inline int task_cputime_zero(const struct task_cputime *cputime)
1007{
Martin Schwidefsky64861632011-12-15 14:56:09 +01001008 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
Stanislaw Gruszka29f87b72010-04-27 14:12:15 -07001009 return 1;
1010 return 0;
1011}
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013/*
1014 * Check for any per-thread CPU timers that have fired and move them
1015 * off the tsk->*_timers list onto the firing list. Per-thread timers
1016 * have already been taken off.
1017 */
1018static void check_process_timers(struct task_struct *tsk,
1019 struct list_head *firing)
1020{
Linus Torvaldse80eda92005-10-23 10:02:50 -07001021 int maxfire;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 struct signal_struct *const sig = tsk->signal;
Frank Mayharf06febc2008-09-12 09:54:39 -07001023 cputime_t utime, ptime, virt_expires, prof_expires;
Ingo Molnar41b86e92007-07-09 18:51:58 +02001024 unsigned long long sum_sched_runtime, sched_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 struct list_head *timers = sig->cpu_timers;
Frank Mayharf06febc2008-09-12 09:54:39 -07001026 struct task_cputime cputime;
Jiri Slabyd4bb52742010-03-05 13:42:53 -08001027 unsigned long soft;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
1029 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 * Collect the current process totals.
1031 */
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +01001032 thread_group_cputimer(tsk, &cputime);
Frank Mayharf06febc2008-09-12 09:54:39 -07001033 utime = cputime.utime;
Martin Schwidefsky64861632011-12-15 14:56:09 +01001034 ptime = utime + cputime.stime;
Frank Mayharf06febc2008-09-12 09:54:39 -07001035 sum_sched_runtime = cputime.sum_exec_runtime;
Linus Torvaldse80eda92005-10-23 10:02:50 -07001036 maxfire = 20;
Martin Schwidefsky64861632011-12-15 14:56:09 +01001037 prof_expires = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 while (!list_empty(timers)) {
WANG Congee7dd202008-04-04 20:54:10 +02001039 struct cpu_timer_list *tl = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 struct cpu_timer_list,
1041 entry);
Martin Schwidefsky64861632011-12-15 14:56:09 +01001042 if (!--maxfire || ptime < tl->expires.cpu) {
WANG Congee7dd202008-04-04 20:54:10 +02001043 prof_expires = tl->expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 break;
1045 }
WANG Congee7dd202008-04-04 20:54:10 +02001046 tl->firing = 1;
1047 list_move_tail(&tl->entry, firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 }
1049
1050 ++timers;
Linus Torvaldse80eda92005-10-23 10:02:50 -07001051 maxfire = 20;
Martin Schwidefsky64861632011-12-15 14:56:09 +01001052 virt_expires = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 while (!list_empty(timers)) {
WANG Congee7dd202008-04-04 20:54:10 +02001054 struct cpu_timer_list *tl = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 struct cpu_timer_list,
1056 entry);
Martin Schwidefsky64861632011-12-15 14:56:09 +01001057 if (!--maxfire || utime < tl->expires.cpu) {
WANG Congee7dd202008-04-04 20:54:10 +02001058 virt_expires = tl->expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 break;
1060 }
WANG Congee7dd202008-04-04 20:54:10 +02001061 tl->firing = 1;
1062 list_move_tail(&tl->entry, firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 }
1064
1065 ++timers;
Linus Torvaldse80eda92005-10-23 10:02:50 -07001066 maxfire = 20;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 sched_expires = 0;
1068 while (!list_empty(timers)) {
WANG Congee7dd202008-04-04 20:54:10 +02001069 struct cpu_timer_list *tl = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 struct cpu_timer_list,
1071 entry);
WANG Congee7dd202008-04-04 20:54:10 +02001072 if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1073 sched_expires = tl->expires.sched;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 break;
1075 }
WANG Congee7dd202008-04-04 20:54:10 +02001076 tl->firing = 1;
1077 list_move_tail(&tl->entry, firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 }
1079
1080 /*
1081 * Check for the special case process timers.
1082 */
Stanislaw Gruszka42c4ab42009-07-29 12:15:26 +02001083 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1084 SIGPROF);
1085 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1086 SIGVTALRM);
Jiri Slaby78d7d402010-03-05 13:42:54 -08001087 soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
Jiri Slabyd4bb52742010-03-05 13:42:53 -08001088 if (soft != RLIM_INFINITY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 unsigned long psecs = cputime_to_secs(ptime);
Jiri Slaby78d7d402010-03-05 13:42:54 -08001090 unsigned long hard =
1091 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 cputime_t x;
Jiri Slabyd4bb52742010-03-05 13:42:53 -08001093 if (psecs >= hard) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 /*
1095 * At the hard limit, we just die.
1096 * No need to calculate anything else now.
1097 */
1098 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1099 return;
1100 }
Jiri Slabyd4bb52742010-03-05 13:42:53 -08001101 if (psecs >= soft) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 /*
1103 * At the soft limit, send a SIGXCPU every second.
1104 */
1105 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
Jiri Slabyd4bb52742010-03-05 13:42:53 -08001106 if (soft < hard) {
1107 soft++;
1108 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 }
1110 }
Jiri Slabyd4bb52742010-03-05 13:42:53 -08001111 x = secs_to_cputime(soft);
Martin Schwidefsky64861632011-12-15 14:56:09 +01001112 if (!prof_expires || x < prof_expires) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 prof_expires = x;
1114 }
1115 }
1116
Stanislaw Gruszka29f87b72010-04-27 14:12:15 -07001117 sig->cputime_expires.prof_exp = prof_expires;
1118 sig->cputime_expires.virt_exp = virt_expires;
1119 sig->cputime_expires.sched_exp = sched_expires;
1120 if (task_cputime_zero(&sig->cputime_expires))
1121 stop_process_timers(sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122}
1123
1124/*
1125 * This is called from the signal code (via do_schedule_next_timer)
1126 * when the last timer signal was delivered and we have to reload the timer.
1127 */
1128void posix_cpu_timer_schedule(struct k_itimer *timer)
1129{
1130 struct task_struct *p = timer->it.cpu.task;
1131 union cpu_time_count now;
1132
1133 if (unlikely(p == NULL))
1134 /*
1135 * The task was cleaned up already, no future firings.
1136 */
Roland McGrath708f430d2005-10-30 15:03:13 -08001137 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
1139 /*
1140 * Fetch the current sample and update the timer's expiry time.
1141 */
1142 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1143 cpu_clock_sample(timer->it_clock, p, &now);
1144 bump_cpu_timer(timer, now);
1145 if (unlikely(p->exit_state)) {
1146 clear_dead_task(timer, now);
Roland McGrath708f430d2005-10-30 15:03:13 -08001147 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 }
1149 read_lock(&tasklist_lock); /* arm_timer needs it. */
Stanislaw Gruszkac2873932010-03-11 14:04:42 -08001150 spin_lock(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 } else {
1152 read_lock(&tasklist_lock);
Oleg Nesterovd30fda32010-05-26 14:43:13 -07001153 if (unlikely(p->sighand == NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 /*
1155 * The process has been reaped.
1156 * We can't even collect a sample any more.
1157 */
1158 put_task_struct(p);
1159 timer->it.cpu.task = p = NULL;
1160 timer->it.cpu.expires.sched = 0;
Roland McGrath708f430d2005-10-30 15:03:13 -08001161 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1163 /*
1164 * We've noticed that the thread is dead, but
1165 * not yet reaped. Take this opportunity to
1166 * drop our task ref.
1167 */
1168 clear_dead_task(timer, now);
Roland McGrath708f430d2005-10-30 15:03:13 -08001169 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 }
Stanislaw Gruszkac2873932010-03-11 14:04:42 -08001171 spin_lock(&p->sighand->siglock);
Peter Zijlstra3997ad32009-02-12 15:00:52 +01001172 cpu_timer_sample_group(timer->it_clock, p, &now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 bump_cpu_timer(timer, now);
1174 /* Leave the tasklist_lock locked for the call below. */
1175 }
1176
1177 /*
1178 * Now re-arm for the new expiry time.
1179 */
Stanislaw Gruszkac2873932010-03-11 14:04:42 -08001180 BUG_ON(!irqs_disabled());
Stanislaw Gruszka5eb9aa62010-03-11 14:04:38 -08001181 arm_timer(timer);
Stanislaw Gruszkac2873932010-03-11 14:04:42 -08001182 spin_unlock(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
Roland McGrath708f430d2005-10-30 15:03:13 -08001184out_unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 read_unlock(&tasklist_lock);
Roland McGrath708f430d2005-10-30 15:03:13 -08001186
1187out:
1188 timer->it_overrun_last = timer->it_overrun;
1189 timer->it_overrun = -1;
1190 ++timer->it_requeue_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191}
1192
Frank Mayharf06febc2008-09-12 09:54:39 -07001193/**
Frank Mayharf06febc2008-09-12 09:54:39 -07001194 * task_cputime_expired - Compare two task_cputime entities.
1195 *
1196 * @sample: The task_cputime structure to be checked for expiration.
1197 * @expires: Expiration times, against which @sample will be checked.
1198 *
1199 * Checks @sample against @expires to see if any field of @sample has expired.
1200 * Returns true if any field of the former is greater than the corresponding
1201 * field of the latter if the latter field is set. Otherwise returns false.
1202 */
1203static inline int task_cputime_expired(const struct task_cputime *sample,
1204 const struct task_cputime *expires)
1205{
Martin Schwidefsky64861632011-12-15 14:56:09 +01001206 if (expires->utime && sample->utime >= expires->utime)
Frank Mayharf06febc2008-09-12 09:54:39 -07001207 return 1;
Martin Schwidefsky64861632011-12-15 14:56:09 +01001208 if (expires->stime && sample->utime + sample->stime >= expires->stime)
Frank Mayharf06febc2008-09-12 09:54:39 -07001209 return 1;
1210 if (expires->sum_exec_runtime != 0 &&
1211 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1212 return 1;
1213 return 0;
1214}
1215
1216/**
1217 * fastpath_timer_check - POSIX CPU timers fast path.
1218 *
1219 * @tsk: The task (thread) being checked.
Frank Mayharf06febc2008-09-12 09:54:39 -07001220 *
Frank Mayharbb34d922008-09-12 09:54:39 -07001221 * Check the task and thread group timers. If both are zero (there are no
1222 * timers set) return false. Otherwise snapshot the task and thread group
1223 * timers and compare them with the corresponding expiration times. Return
1224 * true if a timer has expired, else return false.
Frank Mayharf06febc2008-09-12 09:54:39 -07001225 */
Frank Mayharbb34d922008-09-12 09:54:39 -07001226static inline int fastpath_timer_check(struct task_struct *tsk)
Frank Mayharf06febc2008-09-12 09:54:39 -07001227{
Oleg Nesterovad133ba2008-11-17 15:39:47 +01001228 struct signal_struct *sig;
Frank Mayharf06febc2008-09-12 09:54:39 -07001229
Frank Mayharbb34d922008-09-12 09:54:39 -07001230 if (!task_cputime_zero(&tsk->cputime_expires)) {
1231 struct task_cputime task_sample = {
1232 .utime = tsk->utime,
1233 .stime = tsk->stime,
1234 .sum_exec_runtime = tsk->se.sum_exec_runtime
1235 };
1236
1237 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1238 return 1;
1239 }
Oleg Nesterovad133ba2008-11-17 15:39:47 +01001240
1241 sig = tsk->signal;
Stanislaw Gruszka29f87b72010-04-27 14:12:15 -07001242 if (sig->cputimer.running) {
Frank Mayharbb34d922008-09-12 09:54:39 -07001243 struct task_cputime group_sample;
1244
Thomas Gleixneree30a7b2009-07-25 18:56:56 +02001245 raw_spin_lock(&sig->cputimer.lock);
Oleg Nesterov8d1f4312010-06-11 20:04:46 +02001246 group_sample = sig->cputimer.cputime;
Thomas Gleixneree30a7b2009-07-25 18:56:56 +02001247 raw_spin_unlock(&sig->cputimer.lock);
Oleg Nesterov8d1f4312010-06-11 20:04:46 +02001248
Frank Mayharbb34d922008-09-12 09:54:39 -07001249 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1250 return 1;
1251 }
Oleg Nesterov37bebc72009-03-23 20:34:11 +01001252
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001253 return 0;
Frank Mayharf06febc2008-09-12 09:54:39 -07001254}
1255
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256/*
1257 * This is called from the timer interrupt handler. The irq handler has
1258 * already updated our counts. We need to check if any timers fire now.
1259 * Interrupts are disabled.
1260 */
1261void run_posix_cpu_timers(struct task_struct *tsk)
1262{
1263 LIST_HEAD(firing);
1264 struct k_itimer *timer, *next;
Oleg Nesterov0bdd2ed2010-06-11 01:10:18 +02001265 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
1267 BUG_ON(!irqs_disabled());
1268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001270 * The fast path checks that there are no expired thread or thread
Frank Mayharbb34d922008-09-12 09:54:39 -07001271 * group timers. If that's so, just return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 */
Frank Mayharbb34d922008-09-12 09:54:39 -07001273 if (!fastpath_timer_check(tsk))
Frank Mayharf06febc2008-09-12 09:54:39 -07001274 return;
Ingo Molnar5ce73a42008-09-14 17:11:46 +02001275
Oleg Nesterov0bdd2ed2010-06-11 01:10:18 +02001276 if (!lock_task_sighand(tsk, &flags))
1277 return;
Frank Mayharbb34d922008-09-12 09:54:39 -07001278 /*
1279 * Here we take off tsk->signal->cpu_timers[N] and
1280 * tsk->cpu_timers[N] all the timers that are firing, and
1281 * put them on the firing list.
1282 */
1283 check_thread_timers(tsk, &firing);
Stanislaw Gruszka29f87b72010-04-27 14:12:15 -07001284 /*
1285 * If there are any active process wide timers (POSIX 1.b, itimers,
1286 * RLIMIT_CPU) cputimer must be running.
1287 */
1288 if (tsk->signal->cputimer.running)
1289 check_process_timers(tsk, &firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
Frank Mayharbb34d922008-09-12 09:54:39 -07001291 /*
1292 * We must release these locks before taking any timer's lock.
1293 * There is a potential race with timer deletion here, as the
1294 * siglock now protects our private firing list. We have set
1295 * the firing flag in each timer, so that a deletion attempt
1296 * that gets the timer lock before we do will give it up and
1297 * spin until we've taken care of that timer below.
1298 */
Oleg Nesterov0bdd2ed2010-06-11 01:10:18 +02001299 unlock_task_sighand(tsk, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
1301 /*
1302 * Now that all the timers on our list have the firing flag,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001303 * no one will touch their list entries but us. We'll take
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 * each timer's lock before clearing its firing flag, so no
1305 * timer call will interfere.
1306 */
1307 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001308 int cpu_firing;
1309
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 spin_lock(&timer->it_lock);
1311 list_del_init(&timer->it.cpu.entry);
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001312 cpu_firing = timer->it.cpu.firing;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 timer->it.cpu.firing = 0;
1314 /*
1315 * The firing flag is -1 if we collided with a reset
1316 * of the timer, which already reported this
1317 * almost-firing as an overrun. So don't generate an event.
1318 */
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001319 if (likely(cpu_firing >= 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 cpu_timer_fire(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 spin_unlock(&timer->it_lock);
1322 }
1323}
1324
1325/*
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001326 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
Frank Mayharf06febc2008-09-12 09:54:39 -07001327 * The tsk->sighand->siglock must be held by the caller.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 */
1329void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1330 cputime_t *newval, cputime_t *oldval)
1331{
1332 union cpu_time_count now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
1334 BUG_ON(clock_idx == CPUCLOCK_SCHED);
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +01001335 cpu_timer_sample_group(clock_idx, tsk, &now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
1337 if (oldval) {
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001338 /*
1339 * We are setting itimer. The *oldval is absolute and we update
1340 * it to be relative, *newval argument is relative and we update
1341 * it to be absolute.
1342 */
Martin Schwidefsky64861632011-12-15 14:56:09 +01001343 if (*oldval) {
1344 if (*oldval <= now.cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 /* Just about to fire. */
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02001346 *oldval = cputime_one_jiffy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 } else {
Martin Schwidefsky64861632011-12-15 14:56:09 +01001348 *oldval -= now.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 }
1350 }
1351
Martin Schwidefsky64861632011-12-15 14:56:09 +01001352 if (!*newval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 return;
Martin Schwidefsky64861632011-12-15 14:56:09 +01001354 *newval += now.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 }
1356
1357 /*
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001358 * Update expiration cache if we are the earliest timer, or eventually
1359 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 */
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001361 switch (clock_idx) {
1362 case CPUCLOCK_PROF:
1363 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
Frank Mayharf06febc2008-09-12 09:54:39 -07001364 tsk->signal->cputime_expires.prof_exp = *newval;
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001365 break;
1366 case CPUCLOCK_VIRT:
1367 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
Frank Mayharf06febc2008-09-12 09:54:39 -07001368 tsk->signal->cputime_expires.virt_exp = *newval;
Stanislaw Gruszkaf55db602010-03-11 14:04:37 -08001369 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 }
1371}
1372
Toyo Abee4b76552006-09-29 02:00:29 -07001373static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1374 struct timespec *rqtp, struct itimerspec *it)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 struct k_itimer timer;
1377 int error;
1378
1379 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 * Set up a temporary timer and then wait for it to go off.
1381 */
1382 memset(&timer, 0, sizeof timer);
1383 spin_lock_init(&timer.it_lock);
1384 timer.it_clock = which_clock;
1385 timer.it_overrun = -1;
1386 error = posix_cpu_timer_create(&timer);
1387 timer.it_process = current;
1388 if (!error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 static struct itimerspec zero_it;
Toyo Abee4b76552006-09-29 02:00:29 -07001390
1391 memset(it, 0, sizeof *it);
1392 it->it_value = *rqtp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
1394 spin_lock_irq(&timer.it_lock);
Toyo Abee4b76552006-09-29 02:00:29 -07001395 error = posix_cpu_timer_set(&timer, flags, it, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 if (error) {
1397 spin_unlock_irq(&timer.it_lock);
1398 return error;
1399 }
1400
1401 while (!signal_pending(current)) {
1402 if (timer.it.cpu.expires.sched == 0) {
1403 /*
1404 * Our timer fired and was reset.
1405 */
1406 spin_unlock_irq(&timer.it_lock);
1407 return 0;
1408 }
1409
1410 /*
1411 * Block until cpu_timer_fire (or a signal) wakes us.
1412 */
1413 __set_current_state(TASK_INTERRUPTIBLE);
1414 spin_unlock_irq(&timer.it_lock);
1415 schedule();
1416 spin_lock_irq(&timer.it_lock);
1417 }
1418
1419 /*
1420 * We were interrupted by a signal.
1421 */
1422 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
Toyo Abee4b76552006-09-29 02:00:29 -07001423 posix_cpu_timer_set(&timer, 0, &zero_it, it);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 spin_unlock_irq(&timer.it_lock);
1425
Toyo Abee4b76552006-09-29 02:00:29 -07001426 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 /*
1428 * It actually did fire already.
1429 */
1430 return 0;
1431 }
1432
Toyo Abee4b76552006-09-29 02:00:29 -07001433 error = -ERESTART_RESTARTBLOCK;
1434 }
1435
1436 return error;
1437}
1438
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +00001439static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1440
1441static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1442 struct timespec *rqtp, struct timespec __user *rmtp)
Toyo Abee4b76552006-09-29 02:00:29 -07001443{
1444 struct restart_block *restart_block =
Thomas Gleixner3751f9f2011-02-01 13:51:20 +00001445 &current_thread_info()->restart_block;
Toyo Abee4b76552006-09-29 02:00:29 -07001446 struct itimerspec it;
1447 int error;
1448
1449 /*
1450 * Diagnose required errors first.
1451 */
1452 if (CPUCLOCK_PERTHREAD(which_clock) &&
1453 (CPUCLOCK_PID(which_clock) == 0 ||
1454 CPUCLOCK_PID(which_clock) == current->pid))
1455 return -EINVAL;
1456
1457 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1458
1459 if (error == -ERESTART_RESTARTBLOCK) {
1460
Thomas Gleixner3751f9f2011-02-01 13:51:20 +00001461 if (flags & TIMER_ABSTIME)
Toyo Abee4b76552006-09-29 02:00:29 -07001462 return -ERESTARTNOHAND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 /*
Thomas Gleixner3751f9f2011-02-01 13:51:20 +00001464 * Report back to the user the time still remaining.
1465 */
1466 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 return -EFAULT;
1468
Toyo Abe1711ef32006-09-29 02:00:28 -07001469 restart_block->fn = posix_cpu_nsleep_restart;
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001470 restart_block->nanosleep.clockid = which_clock;
Thomas Gleixner3751f9f2011-02-01 13:51:20 +00001471 restart_block->nanosleep.rmtp = rmtp;
1472 restart_block->nanosleep.expires = timespec_to_ns(rqtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 return error;
1475}
1476
Thomas Gleixnerbc2c8ea2011-02-01 13:52:12 +00001477static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478{
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001479 clockid_t which_clock = restart_block->nanosleep.clockid;
Thomas Gleixner97735f22006-01-09 20:52:37 -08001480 struct timespec t;
Toyo Abee4b76552006-09-29 02:00:29 -07001481 struct itimerspec it;
1482 int error;
Thomas Gleixner97735f22006-01-09 20:52:37 -08001483
Thomas Gleixner3751f9f2011-02-01 13:51:20 +00001484 t = ns_to_timespec(restart_block->nanosleep.expires);
Thomas Gleixner97735f22006-01-09 20:52:37 -08001485
Toyo Abee4b76552006-09-29 02:00:29 -07001486 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1487
1488 if (error == -ERESTART_RESTARTBLOCK) {
Thomas Gleixner3751f9f2011-02-01 13:51:20 +00001489 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
Toyo Abee4b76552006-09-29 02:00:29 -07001490 /*
Thomas Gleixner3751f9f2011-02-01 13:51:20 +00001491 * Report back to the user the time still remaining.
1492 */
1493 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
Toyo Abee4b76552006-09-29 02:00:29 -07001494 return -EFAULT;
1495
Thomas Gleixner3751f9f2011-02-01 13:51:20 +00001496 restart_block->nanosleep.expires = timespec_to_ns(&t);
Toyo Abee4b76552006-09-29 02:00:29 -07001497 }
1498 return error;
1499
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500}
1501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1503#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1504
Thomas Gleixnera924b042006-01-09 20:52:27 -08001505static int process_cpu_clock_getres(const clockid_t which_clock,
1506 struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507{
1508 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1509}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001510static int process_cpu_clock_get(const clockid_t which_clock,
1511 struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512{
1513 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1514}
1515static int process_cpu_timer_create(struct k_itimer *timer)
1516{
1517 timer->it_clock = PROCESS_CLOCK;
1518 return posix_cpu_timer_create(timer);
1519}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001520static int process_cpu_nsleep(const clockid_t which_clock, int flags,
Thomas Gleixner97735f22006-01-09 20:52:37 -08001521 struct timespec *rqtp,
1522 struct timespec __user *rmtp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523{
Thomas Gleixner97735f22006-01-09 20:52:37 -08001524 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525}
Toyo Abe1711ef32006-09-29 02:00:28 -07001526static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1527{
1528 return -EINVAL;
1529}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001530static int thread_cpu_clock_getres(const clockid_t which_clock,
1531 struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532{
1533 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1534}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001535static int thread_cpu_clock_get(const clockid_t which_clock,
1536 struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537{
1538 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1539}
1540static int thread_cpu_timer_create(struct k_itimer *timer)
1541{
1542 timer->it_clock = THREAD_CLOCK;
1543 return posix_cpu_timer_create(timer);
1544}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
Thomas Gleixner19769452011-02-01 13:51:06 +00001546struct k_clock clock_posix_cpu = {
1547 .clock_getres = posix_cpu_clock_getres,
1548 .clock_set = posix_cpu_clock_set,
1549 .clock_get = posix_cpu_clock_get,
1550 .timer_create = posix_cpu_timer_create,
1551 .nsleep = posix_cpu_nsleep,
1552 .nsleep_restart = posix_cpu_nsleep_restart,
1553 .timer_set = posix_cpu_timer_set,
1554 .timer_del = posix_cpu_timer_del,
1555 .timer_get = posix_cpu_timer_get,
1556};
1557
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558static __init int init_posix_cpu_timers(void)
1559{
1560 struct k_clock process = {
Thomas Gleixner2fd1f042011-02-01 13:51:03 +00001561 .clock_getres = process_cpu_clock_getres,
1562 .clock_get = process_cpu_clock_get,
Thomas Gleixner2fd1f042011-02-01 13:51:03 +00001563 .timer_create = process_cpu_timer_create,
1564 .nsleep = process_cpu_nsleep,
1565 .nsleep_restart = process_cpu_nsleep_restart,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 };
1567 struct k_clock thread = {
Thomas Gleixner2fd1f042011-02-01 13:51:03 +00001568 .clock_getres = thread_cpu_clock_getres,
1569 .clock_get = thread_cpu_clock_get,
Thomas Gleixner2fd1f042011-02-01 13:51:03 +00001570 .timer_create = thread_cpu_timer_create,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 };
Stanislaw Gruszka8356b5f2009-07-29 12:15:27 +02001572 struct timespec ts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Thomas Gleixner52708732011-02-02 12:10:09 +01001574 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1575 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02001577 cputime_to_timespec(cputime_one_jiffy, &ts);
Stanislaw Gruszka8356b5f2009-07-29 12:15:27 +02001578 onecputick = ts.tv_nsec;
1579 WARN_ON(ts.tv_sec != 0);
1580
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 return 0;
1582}
1583__initcall(init_posix_cpu_timers);