blob: e33a21cb9407987a0aa9b46c32baba590302862b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Implement CPU time clocks for the POSIX clock interface.
3 */
4
5#include <linux/sched.h>
6#include <linux/posix-timers.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/errno.h>
Roman Zippelf8bd2252008-05-01 04:34:31 -07008#include <linux/math64.h>
9#include <asm/uaccess.h>
Frank Mayharbb34d922008-09-12 09:54:39 -070010#include <linux/kernel_stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Frank Mayharf06febc2008-09-12 09:54:39 -070012/*
Frank Mayharf06febc2008-09-12 09:54:39 -070013 * Called after updating RLIMIT_CPU to set timer expiration if necessary.
14 */
15void update_rlimit_cpu(unsigned long rlim_new)
16{
17 cputime_t cputime;
18
19 cputime = secs_to_cputime(rlim_new);
20 if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
Oleg Nesterov8f2e5862009-03-27 01:06:10 +010021 cputime_gt(current->signal->it_prof_expires, cputime)) {
Frank Mayharf06febc2008-09-12 09:54:39 -070022 spin_lock_irq(&current->sighand->siglock);
23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
24 spin_unlock_irq(&current->sighand->siglock);
25 }
26}
27
Thomas Gleixnera924b042006-01-09 20:52:27 -080028static int check_clock(const clockid_t which_clock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
30 int error = 0;
31 struct task_struct *p;
32 const pid_t pid = CPUCLOCK_PID(which_clock);
33
34 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
35 return -EINVAL;
36
37 if (pid == 0)
38 return 0;
39
40 read_lock(&tasklist_lock);
Pavel Emelyanov8dc86af2008-02-08 04:21:52 -080041 p = find_task_by_vpid(pid);
Pavel Emelyanovbac0abd2007-10-18 23:40:18 -070042 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
43 same_thread_group(p, current) : thread_group_leader(p))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 error = -EINVAL;
45 }
46 read_unlock(&tasklist_lock);
47
48 return error;
49}
50
51static inline union cpu_time_count
Thomas Gleixnera924b042006-01-09 20:52:27 -080052timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
54 union cpu_time_count ret;
55 ret.sched = 0; /* high half always zero when .cpu used */
56 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
Oleg Nesterovee500f22005-11-28 13:43:55 -080057 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 } else {
59 ret.cpu = timespec_to_cputime(tp);
60 }
61 return ret;
62}
63
Thomas Gleixnera924b042006-01-09 20:52:27 -080064static void sample_to_timespec(const clockid_t which_clock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 union cpu_time_count cpu,
66 struct timespec *tp)
67{
Roman Zippelf8bd2252008-05-01 04:34:31 -070068 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
69 *tp = ns_to_timespec(cpu.sched);
70 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 cputime_to_timespec(cpu.cpu, tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072}
73
Thomas Gleixnera924b042006-01-09 20:52:27 -080074static inline int cpu_time_before(const clockid_t which_clock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 union cpu_time_count now,
76 union cpu_time_count then)
77{
78 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
79 return now.sched < then.sched;
80 } else {
81 return cputime_lt(now.cpu, then.cpu);
82 }
83}
Thomas Gleixnera924b042006-01-09 20:52:27 -080084static inline void cpu_time_add(const clockid_t which_clock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 union cpu_time_count *acc,
86 union cpu_time_count val)
87{
88 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
89 acc->sched += val.sched;
90 } else {
91 acc->cpu = cputime_add(acc->cpu, val.cpu);
92 }
93}
Thomas Gleixnera924b042006-01-09 20:52:27 -080094static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 union cpu_time_count a,
96 union cpu_time_count b)
97{
98 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
99 a.sched -= b.sched;
100 } else {
101 a.cpu = cputime_sub(a.cpu, b.cpu);
102 }
103 return a;
104}
105
106/*
Thomas Gleixnerac08c262006-10-17 00:09:39 -0700107 * Divide and limit the result to res >= 1
108 *
109 * This is necessary to prevent signal delivery starvation, when the result of
110 * the division would be rounded down to 0.
111 */
112static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
113{
114 cputime_t res = cputime_div(time, div);
115
116 return max_t(cputime_t, res, 1);
117}
118
119/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 * Update expiry time from increment, and increase overrun count,
121 * given the current clock sample.
122 */
Oleg Nesterov7a4ed932005-10-26 20:26:53 +0400123static void bump_cpu_timer(struct k_itimer *timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 union cpu_time_count now)
125{
126 int i;
127
128 if (timer->it.cpu.incr.sched == 0)
129 return;
130
131 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
132 unsigned long long delta, incr;
133
134 if (now.sched < timer->it.cpu.expires.sched)
135 return;
136 incr = timer->it.cpu.incr.sched;
137 delta = now.sched + incr - timer->it.cpu.expires.sched;
138 /* Don't use (incr*2 < delta), incr*2 might overflow. */
139 for (i = 0; incr < delta - incr; i++)
140 incr = incr << 1;
141 for (; i >= 0; incr >>= 1, i--) {
Oleg Nesterov7a4ed932005-10-26 20:26:53 +0400142 if (delta < incr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 continue;
144 timer->it.cpu.expires.sched += incr;
145 timer->it_overrun += 1 << i;
146 delta -= incr;
147 }
148 } else {
149 cputime_t delta, incr;
150
151 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
152 return;
153 incr = timer->it.cpu.incr.cpu;
154 delta = cputime_sub(cputime_add(now.cpu, incr),
155 timer->it.cpu.expires.cpu);
156 /* Don't use (incr*2 < delta), incr*2 might overflow. */
157 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
158 incr = cputime_add(incr, incr);
159 for (; i >= 0; incr = cputime_halve(incr), i--) {
Oleg Nesterov7a4ed932005-10-26 20:26:53 +0400160 if (cputime_lt(delta, incr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 continue;
162 timer->it.cpu.expires.cpu =
163 cputime_add(timer->it.cpu.expires.cpu, incr);
164 timer->it_overrun += 1 << i;
165 delta = cputime_sub(delta, incr);
166 }
167 }
168}
169
170static inline cputime_t prof_ticks(struct task_struct *p)
171{
172 return cputime_add(p->utime, p->stime);
173}
174static inline cputime_t virt_ticks(struct task_struct *p)
175{
176 return p->utime;
177}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Thomas Gleixnera924b042006-01-09 20:52:27 -0800179int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
181 int error = check_clock(which_clock);
182 if (!error) {
183 tp->tv_sec = 0;
184 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
185 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
186 /*
187 * If sched_clock is using a cycle counter, we
188 * don't have any idea of its true resolution
189 * exported, but it is much more than 1s/HZ.
190 */
191 tp->tv_nsec = 1;
192 }
193 }
194 return error;
195}
196
Thomas Gleixnera924b042006-01-09 20:52:27 -0800197int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
199 /*
200 * You can never reset a CPU clock, but we check for other errors
201 * in the call before failing with EPERM.
202 */
203 int error = check_clock(which_clock);
204 if (error == 0) {
205 error = -EPERM;
206 }
207 return error;
208}
209
210
211/*
212 * Sample a per-thread clock for the given task.
213 */
Thomas Gleixnera924b042006-01-09 20:52:27 -0800214static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 union cpu_time_count *cpu)
216{
217 switch (CPUCLOCK_WHICH(which_clock)) {
218 default:
219 return -EINVAL;
220 case CPUCLOCK_PROF:
221 cpu->cpu = prof_ticks(p);
222 break;
223 case CPUCLOCK_VIRT:
224 cpu->cpu = virt_ticks(p);
225 break;
226 case CPUCLOCK_SCHED:
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +0900227 cpu->sched = task_sched_runtime(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 break;
229 }
230 return 0;
231}
232
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +0100233void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
234{
235 struct sighand_struct *sighand;
236 struct signal_struct *sig;
237 struct task_struct *t;
238
239 *times = INIT_CPUTIME;
240
241 rcu_read_lock();
242 sighand = rcu_dereference(tsk->sighand);
243 if (!sighand)
244 goto out;
245
246 sig = tsk->signal;
247
248 t = tsk;
249 do {
250 times->utime = cputime_add(times->utime, t->utime);
251 times->stime = cputime_add(times->stime, t->stime);
252 times->sum_exec_runtime += t->se.sum_exec_runtime;
253
254 t = next_thread(t);
255 } while (t != tsk);
256
257 times->utime = cputime_add(times->utime, sig->utime);
258 times->stime = cputime_add(times->stime, sig->stime);
259 times->sum_exec_runtime += sig->sum_sched_runtime;
260out:
261 rcu_read_unlock();
262}
263
Peter Zijlstra4da94d492009-02-11 11:30:27 +0100264static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
265{
266 if (cputime_gt(b->utime, a->utime))
267 a->utime = b->utime;
268
269 if (cputime_gt(b->stime, a->stime))
270 a->stime = b->stime;
271
272 if (b->sum_exec_runtime > a->sum_exec_runtime)
273 a->sum_exec_runtime = b->sum_exec_runtime;
274}
275
276void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
277{
278 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
279 struct task_cputime sum;
280 unsigned long flags;
281
282 spin_lock_irqsave(&cputimer->lock, flags);
283 if (!cputimer->running) {
284 cputimer->running = 1;
285 /*
286 * The POSIX timer interface allows for absolute time expiry
287 * values through the TIMER_ABSTIME flag, therefore we have
288 * to synchronize the timer to the clock every time we start
289 * it.
290 */
291 thread_group_cputime(tsk, &sum);
292 update_gt_cputime(&cputimer->cputime, &sum);
293 }
294 *times = cputimer->cputime;
295 spin_unlock_irqrestore(&cputimer->lock, flags);
296}
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298/*
299 * Sample a process (thread group) clock for the given group_leader task.
300 * Must be called with tasklist_lock held for reading.
301 */
Thomas Gleixnera924b042006-01-09 20:52:27 -0800302static int cpu_clock_sample_group(const clockid_t which_clock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 struct task_struct *p,
304 union cpu_time_count *cpu)
305{
Frank Mayharbb34d922008-09-12 09:54:39 -0700306 struct task_cputime cputime;
307
Petr Tesarikeccdaea2008-11-24 15:46:31 +0100308 switch (CPUCLOCK_WHICH(which_clock)) {
Frank Mayharbb34d922008-09-12 09:54:39 -0700309 default:
310 return -EINVAL;
311 case CPUCLOCK_PROF:
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +0900312 thread_group_cputime(p, &cputime);
Frank Mayharbb34d922008-09-12 09:54:39 -0700313 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
314 break;
315 case CPUCLOCK_VIRT:
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +0900316 thread_group_cputime(p, &cputime);
Frank Mayharbb34d922008-09-12 09:54:39 -0700317 cpu->cpu = cputime.utime;
318 break;
319 case CPUCLOCK_SCHED:
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +0900320 cpu->sched = thread_group_sched_runtime(p);
Frank Mayharbb34d922008-09-12 09:54:39 -0700321 break;
322 }
323 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325
326
Thomas Gleixnera924b042006-01-09 20:52:27 -0800327int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
329 const pid_t pid = CPUCLOCK_PID(which_clock);
330 int error = -EINVAL;
331 union cpu_time_count rtn;
332
333 if (pid == 0) {
334 /*
335 * Special case constant value for our own clocks.
336 * We don't have to do any lookup to find ourselves.
337 */
338 if (CPUCLOCK_PERTHREAD(which_clock)) {
339 /*
340 * Sampling just ourselves we can do with no locking.
341 */
342 error = cpu_clock_sample(which_clock,
343 current, &rtn);
344 } else {
345 read_lock(&tasklist_lock);
346 error = cpu_clock_sample_group(which_clock,
347 current, &rtn);
348 read_unlock(&tasklist_lock);
349 }
350 } else {
351 /*
352 * Find the given PID, and validate that the caller
353 * should be able to see it.
354 */
355 struct task_struct *p;
Paul E. McKenney1f2ea082007-02-16 01:28:22 -0800356 rcu_read_lock();
Pavel Emelyanov8dc86af2008-02-08 04:21:52 -0800357 p = find_task_by_vpid(pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 if (p) {
359 if (CPUCLOCK_PERTHREAD(which_clock)) {
Pavel Emelyanovbac0abd2007-10-18 23:40:18 -0700360 if (same_thread_group(p, current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 error = cpu_clock_sample(which_clock,
362 p, &rtn);
363 }
Paul E. McKenney1f2ea082007-02-16 01:28:22 -0800364 } else {
365 read_lock(&tasklist_lock);
Pavel Emelyanovbac0abd2007-10-18 23:40:18 -0700366 if (thread_group_leader(p) && p->signal) {
Paul E. McKenney1f2ea082007-02-16 01:28:22 -0800367 error =
368 cpu_clock_sample_group(which_clock,
369 p, &rtn);
370 }
371 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 }
373 }
Paul E. McKenney1f2ea082007-02-16 01:28:22 -0800374 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
376
377 if (error)
378 return error;
379 sample_to_timespec(which_clock, rtn, tp);
380 return 0;
381}
382
383
384/*
385 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
386 * This is called from sys_timer_create with the new timer already locked.
387 */
388int posix_cpu_timer_create(struct k_itimer *new_timer)
389{
390 int ret = 0;
391 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
392 struct task_struct *p;
393
394 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
395 return -EINVAL;
396
397 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
398 new_timer->it.cpu.incr.sched = 0;
399 new_timer->it.cpu.expires.sched = 0;
400
401 read_lock(&tasklist_lock);
402 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
403 if (pid == 0) {
404 p = current;
405 } else {
Pavel Emelyanov8dc86af2008-02-08 04:21:52 -0800406 p = find_task_by_vpid(pid);
Pavel Emelyanovbac0abd2007-10-18 23:40:18 -0700407 if (p && !same_thread_group(p, current))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 p = NULL;
409 }
410 } else {
411 if (pid == 0) {
412 p = current->group_leader;
413 } else {
Pavel Emelyanov8dc86af2008-02-08 04:21:52 -0800414 p = find_task_by_vpid(pid);
Pavel Emelyanovbac0abd2007-10-18 23:40:18 -0700415 if (p && !thread_group_leader(p))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 p = NULL;
417 }
418 }
419 new_timer->it.cpu.task = p;
420 if (p) {
421 get_task_struct(p);
422 } else {
423 ret = -EINVAL;
424 }
425 read_unlock(&tasklist_lock);
426
427 return ret;
428}
429
430/*
431 * Clean up a CPU-clock timer that is about to be destroyed.
432 * This is called from timer deletion with the timer already locked.
433 * If we return TIMER_RETRY, it's necessary to release the timer's lock
434 * and try again. (This happens when the timer is in the middle of firing.)
435 */
436int posix_cpu_timer_del(struct k_itimer *timer)
437{
438 struct task_struct *p = timer->it.cpu.task;
Oleg Nesterov108150e2005-10-23 20:25:39 +0400439 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Oleg Nesterov108150e2005-10-23 20:25:39 +0400441 if (likely(p != NULL)) {
Linus Torvalds9465bee2005-10-21 15:36:00 -0700442 read_lock(&tasklist_lock);
443 if (unlikely(p->signal == NULL)) {
444 /*
445 * We raced with the reaping of the task.
446 * The deletion should have cleared us off the list.
447 */
448 BUG_ON(!list_empty(&timer->it.cpu.entry));
449 } else {
Linus Torvalds9465bee2005-10-21 15:36:00 -0700450 spin_lock(&p->sighand->siglock);
Oleg Nesterov108150e2005-10-23 20:25:39 +0400451 if (timer->it.cpu.firing)
452 ret = TIMER_RETRY;
453 else
454 list_del(&timer->it.cpu.entry);
Linus Torvalds9465bee2005-10-21 15:36:00 -0700455 spin_unlock(&p->sighand->siglock);
456 }
457 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
Oleg Nesterov108150e2005-10-23 20:25:39 +0400459 if (!ret)
460 put_task_struct(p);
461 }
462
463 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
466/*
467 * Clean out CPU timers still ticking when a thread exited. The task
468 * pointer is cleared, and the expiry time is replaced with the residual
469 * time for later timer_gettime calls to return.
470 * This must be called with the siglock held.
471 */
472static void cleanup_timers(struct list_head *head,
473 cputime_t utime, cputime_t stime,
Ingo Molnar41b86e92007-07-09 18:51:58 +0200474 unsigned long long sum_exec_runtime)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475{
476 struct cpu_timer_list *timer, *next;
477 cputime_t ptime = cputime_add(utime, stime);
478
479 list_for_each_entry_safe(timer, next, head, entry) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 list_del_init(&timer->entry);
481 if (cputime_lt(timer->expires.cpu, ptime)) {
482 timer->expires.cpu = cputime_zero;
483 } else {
484 timer->expires.cpu = cputime_sub(timer->expires.cpu,
485 ptime);
486 }
487 }
488
489 ++head;
490 list_for_each_entry_safe(timer, next, head, entry) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 list_del_init(&timer->entry);
492 if (cputime_lt(timer->expires.cpu, utime)) {
493 timer->expires.cpu = cputime_zero;
494 } else {
495 timer->expires.cpu = cputime_sub(timer->expires.cpu,
496 utime);
497 }
498 }
499
500 ++head;
501 list_for_each_entry_safe(timer, next, head, entry) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 list_del_init(&timer->entry);
Ingo Molnar41b86e92007-07-09 18:51:58 +0200503 if (timer->expires.sched < sum_exec_runtime) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 timer->expires.sched = 0;
505 } else {
Ingo Molnar41b86e92007-07-09 18:51:58 +0200506 timer->expires.sched -= sum_exec_runtime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508 }
509}
510
511/*
512 * These are both called with the siglock held, when the current thread
513 * is being reaped. When the final (leader) thread in the group is reaped,
514 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
515 */
516void posix_cpu_timers_exit(struct task_struct *tsk)
517{
518 cleanup_timers(tsk->cpu_timers,
Ingo Molnar41b86e92007-07-09 18:51:58 +0200519 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521}
522void posix_cpu_timers_exit_group(struct task_struct *tsk)
523{
Stanislaw Gruszka17d42c12009-08-06 16:03:30 -0700524 struct signal_struct *const sig = tsk->signal;
Frank Mayharf06febc2008-09-12 09:54:39 -0700525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 cleanup_timers(tsk->signal->cpu_timers,
Stanislaw Gruszka17d42c12009-08-06 16:03:30 -0700527 cputime_add(tsk->utime, sig->utime),
528 cputime_add(tsk->stime, sig->stime),
529 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530}
531
532static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
533{
534 /*
535 * That's all for this thread or process.
536 * We leave our residual in expires to be reported.
537 */
538 put_task_struct(timer->it.cpu.task);
539 timer->it.cpu.task = NULL;
540 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
541 timer->it.cpu.expires,
542 now);
543}
544
545/*
546 * Insert the timer on the appropriate list before any timers that
547 * expire later. This must be called with the tasklist_lock held
548 * for reading, and interrupts disabled.
549 */
550static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
551{
552 struct task_struct *p = timer->it.cpu.task;
553 struct list_head *head, *listpos;
554 struct cpu_timer_list *const nt = &timer->it.cpu;
555 struct cpu_timer_list *next;
556 unsigned long i;
557
558 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
559 p->cpu_timers : p->signal->cpu_timers);
560 head += CPUCLOCK_WHICH(timer->it_clock);
561
562 BUG_ON(!irqs_disabled());
563 spin_lock(&p->sighand->siglock);
564
565 listpos = head;
566 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
567 list_for_each_entry(next, head, entry) {
Linus Torvalds70ab81c2005-10-26 11:23:06 -0700568 if (next->expires.sched > nt->expires.sched)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 break;
Linus Torvalds70ab81c2005-10-26 11:23:06 -0700570 listpos = &next->entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 }
572 } else {
573 list_for_each_entry(next, head, entry) {
Linus Torvalds70ab81c2005-10-26 11:23:06 -0700574 if (cputime_gt(next->expires.cpu, nt->expires.cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 break;
Linus Torvalds70ab81c2005-10-26 11:23:06 -0700576 listpos = &next->entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 }
578 }
579 list_add(&nt->entry, listpos);
580
581 if (listpos == head) {
582 /*
583 * We are the new earliest-expiring timer.
584 * If we are a thread timer, there can always
585 * be a process timer telling us to stop earlier.
586 */
587
588 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
589 switch (CPUCLOCK_WHICH(timer->it_clock)) {
590 default:
591 BUG();
592 case CPUCLOCK_PROF:
Frank Mayharf06febc2008-09-12 09:54:39 -0700593 if (cputime_eq(p->cputime_expires.prof_exp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 cputime_zero) ||
Frank Mayharf06febc2008-09-12 09:54:39 -0700595 cputime_gt(p->cputime_expires.prof_exp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 nt->expires.cpu))
Frank Mayharf06febc2008-09-12 09:54:39 -0700597 p->cputime_expires.prof_exp =
598 nt->expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 break;
600 case CPUCLOCK_VIRT:
Frank Mayharf06febc2008-09-12 09:54:39 -0700601 if (cputime_eq(p->cputime_expires.virt_exp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 cputime_zero) ||
Frank Mayharf06febc2008-09-12 09:54:39 -0700603 cputime_gt(p->cputime_expires.virt_exp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 nt->expires.cpu))
Frank Mayharf06febc2008-09-12 09:54:39 -0700605 p->cputime_expires.virt_exp =
606 nt->expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 break;
608 case CPUCLOCK_SCHED:
Frank Mayharf06febc2008-09-12 09:54:39 -0700609 if (p->cputime_expires.sched_exp == 0 ||
610 p->cputime_expires.sched_exp >
611 nt->expires.sched)
612 p->cputime_expires.sched_exp =
613 nt->expires.sched;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 break;
615 }
616 } else {
617 /*
Frank Mayharf06febc2008-09-12 09:54:39 -0700618 * For a process timer, set the cached expiration time.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 */
620 switch (CPUCLOCK_WHICH(timer->it_clock)) {
621 default:
622 BUG();
623 case CPUCLOCK_VIRT:
624 if (!cputime_eq(p->signal->it_virt_expires,
625 cputime_zero) &&
626 cputime_lt(p->signal->it_virt_expires,
627 timer->it.cpu.expires.cpu))
628 break;
Frank Mayharf06febc2008-09-12 09:54:39 -0700629 p->signal->cputime_expires.virt_exp =
630 timer->it.cpu.expires.cpu;
631 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 case CPUCLOCK_PROF:
633 if (!cputime_eq(p->signal->it_prof_expires,
634 cputime_zero) &&
635 cputime_lt(p->signal->it_prof_expires,
636 timer->it.cpu.expires.cpu))
637 break;
638 i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
639 if (i != RLIM_INFINITY &&
640 i <= cputime_to_secs(timer->it.cpu.expires.cpu))
641 break;
Frank Mayharf06febc2008-09-12 09:54:39 -0700642 p->signal->cputime_expires.prof_exp =
643 timer->it.cpu.expires.cpu;
644 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 case CPUCLOCK_SCHED:
Frank Mayharf06febc2008-09-12 09:54:39 -0700646 p->signal->cputime_expires.sched_exp =
647 timer->it.cpu.expires.sched;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 break;
649 }
650 }
651 }
652
653 spin_unlock(&p->sighand->siglock);
654}
655
656/*
657 * The timer is locked, fire it and arrange for its reload.
658 */
659static void cpu_timer_fire(struct k_itimer *timer)
660{
661 if (unlikely(timer->sigq == NULL)) {
662 /*
663 * This a special case for clock_nanosleep,
664 * not a normal timer from sys_timer_create.
665 */
666 wake_up_process(timer->it_process);
667 timer->it.cpu.expires.sched = 0;
668 } else if (timer->it.cpu.incr.sched == 0) {
669 /*
670 * One-shot timer. Clear it as soon as it's fired.
671 */
672 posix_timer_event(timer, 0);
673 timer->it.cpu.expires.sched = 0;
674 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
675 /*
676 * The signal did not get queued because the signal
677 * was ignored, so we won't get any callback to
678 * reload the timer. But we need to keep it
679 * ticking in case the signal is deliverable next time.
680 */
681 posix_cpu_timer_schedule(timer);
682 }
683}
684
685/*
Peter Zijlstra3997ad32009-02-12 15:00:52 +0100686 * Sample a process (thread group) timer for the given group_leader task.
687 * Must be called with tasklist_lock held for reading.
688 */
689static int cpu_timer_sample_group(const clockid_t which_clock,
690 struct task_struct *p,
691 union cpu_time_count *cpu)
692{
693 struct task_cputime cputime;
694
695 thread_group_cputimer(p, &cputime);
696 switch (CPUCLOCK_WHICH(which_clock)) {
697 default:
698 return -EINVAL;
699 case CPUCLOCK_PROF:
700 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
701 break;
702 case CPUCLOCK_VIRT:
703 cpu->cpu = cputime.utime;
704 break;
705 case CPUCLOCK_SCHED:
706 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
707 break;
708 }
709 return 0;
710}
711
712/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 * Guts of sys_timer_settime for CPU timers.
714 * This is called with the timer locked and interrupts disabled.
715 * If we return TIMER_RETRY, it's necessary to release the timer's lock
716 * and try again. (This happens when the timer is in the middle of firing.)
717 */
718int posix_cpu_timer_set(struct k_itimer *timer, int flags,
719 struct itimerspec *new, struct itimerspec *old)
720{
721 struct task_struct *p = timer->it.cpu.task;
722 union cpu_time_count old_expires, new_expires, val;
723 int ret;
724
725 if (unlikely(p == NULL)) {
726 /*
727 * Timer refers to a dead task's clock.
728 */
729 return -ESRCH;
730 }
731
732 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
733
734 read_lock(&tasklist_lock);
735 /*
736 * We need the tasklist_lock to protect against reaping that
737 * clears p->signal. If p has just been reaped, we can no
738 * longer get any information about it at all.
739 */
740 if (unlikely(p->signal == NULL)) {
741 read_unlock(&tasklist_lock);
742 put_task_struct(p);
743 timer->it.cpu.task = NULL;
744 return -ESRCH;
745 }
746
747 /*
748 * Disarm any old timer after extracting its expiry time.
749 */
750 BUG_ON(!irqs_disabled());
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400751
752 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 spin_lock(&p->sighand->siglock);
754 old_expires = timer->it.cpu.expires;
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400755 if (unlikely(timer->it.cpu.firing)) {
756 timer->it.cpu.firing = -1;
757 ret = TIMER_RETRY;
758 } else
759 list_del_init(&timer->it.cpu.entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 spin_unlock(&p->sighand->siglock);
761
762 /*
763 * We need to sample the current value to convert the new
764 * value from to relative and absolute, and to convert the
765 * old value from absolute to relative. To set a process
766 * timer, we need a sample to balance the thread expiry
767 * times (in arm_timer). With an absolute time, we must
768 * check if it's already passed. In short, we need a sample.
769 */
770 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
771 cpu_clock_sample(timer->it_clock, p, &val);
772 } else {
Peter Zijlstra3997ad32009-02-12 15:00:52 +0100773 cpu_timer_sample_group(timer->it_clock, p, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 }
775
776 if (old) {
777 if (old_expires.sched == 0) {
778 old->it_value.tv_sec = 0;
779 old->it_value.tv_nsec = 0;
780 } else {
781 /*
782 * Update the timer in case it has
783 * overrun already. If it has,
784 * we'll report it as having overrun
785 * and with the next reloaded timer
786 * already ticking, though we are
787 * swallowing that pending
788 * notification here to install the
789 * new setting.
790 */
791 bump_cpu_timer(timer, val);
792 if (cpu_time_before(timer->it_clock, val,
793 timer->it.cpu.expires)) {
794 old_expires = cpu_time_sub(
795 timer->it_clock,
796 timer->it.cpu.expires, val);
797 sample_to_timespec(timer->it_clock,
798 old_expires,
799 &old->it_value);
800 } else {
801 old->it_value.tv_nsec = 1;
802 old->it_value.tv_sec = 0;
803 }
804 }
805 }
806
Oleg Nesterova69ac4a2005-10-24 18:29:58 +0400807 if (unlikely(ret)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 /*
809 * We are colliding with the timer actually firing.
810 * Punt after filling in the timer's old value, and
811 * disable this firing since we are already reporting
812 * it as an overrun (thanks to bump_cpu_timer above).
813 */
814 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 goto out;
816 }
817
818 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
819 cpu_time_add(timer->it_clock, &new_expires, val);
820 }
821
822 /*
823 * Install the new expiry time (or zero).
824 * For a timer with no notification action, we don't actually
825 * arm the timer (we'll just fake it for timer_gettime).
826 */
827 timer->it.cpu.expires = new_expires;
828 if (new_expires.sched != 0 &&
829 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
830 cpu_time_before(timer->it_clock, val, new_expires)) {
831 arm_timer(timer, val);
832 }
833
834 read_unlock(&tasklist_lock);
835
836 /*
837 * Install the new reload setting, and
838 * set up the signal and overrun bookkeeping.
839 */
840 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
841 &new->it_interval);
842
843 /*
844 * This acts as a modification timestamp for the timer,
845 * so any automatic reload attempt will punt on seeing
846 * that we have reset the timer manually.
847 */
848 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
849 ~REQUEUE_PENDING;
850 timer->it_overrun_last = 0;
851 timer->it_overrun = -1;
852
853 if (new_expires.sched != 0 &&
854 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
855 !cpu_time_before(timer->it_clock, val, new_expires)) {
856 /*
857 * The designated time already passed, so we notify
858 * immediately, even if the thread never runs to
859 * accumulate more time on this clock.
860 */
861 cpu_timer_fire(timer);
862 }
863
864 ret = 0;
865 out:
866 if (old) {
867 sample_to_timespec(timer->it_clock,
868 timer->it.cpu.incr, &old->it_interval);
869 }
870 return ret;
871}
872
873void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
874{
875 union cpu_time_count now;
876 struct task_struct *p = timer->it.cpu.task;
877 int clear_dead;
878
879 /*
880 * Easy part: convert the reload time.
881 */
882 sample_to_timespec(timer->it_clock,
883 timer->it.cpu.incr, &itp->it_interval);
884
885 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
886 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
887 return;
888 }
889
890 if (unlikely(p == NULL)) {
891 /*
892 * This task already died and the timer will never fire.
893 * In this case, expires is actually the dead value.
894 */
895 dead:
896 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
897 &itp->it_value);
898 return;
899 }
900
901 /*
902 * Sample the clock to take the difference with the expiry time.
903 */
904 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
905 cpu_clock_sample(timer->it_clock, p, &now);
906 clear_dead = p->exit_state;
907 } else {
908 read_lock(&tasklist_lock);
909 if (unlikely(p->signal == NULL)) {
910 /*
911 * The process has been reaped.
912 * We can't even collect a sample any more.
913 * Call the timer disarmed, nothing else to do.
914 */
915 put_task_struct(p);
916 timer->it.cpu.task = NULL;
917 timer->it.cpu.expires.sched = 0;
918 read_unlock(&tasklist_lock);
919 goto dead;
920 } else {
Peter Zijlstra3997ad32009-02-12 15:00:52 +0100921 cpu_timer_sample_group(timer->it_clock, p, &now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 clear_dead = (unlikely(p->exit_state) &&
923 thread_group_empty(p));
924 }
925 read_unlock(&tasklist_lock);
926 }
927
928 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
929 if (timer->it.cpu.incr.sched == 0 &&
930 cpu_time_before(timer->it_clock,
931 timer->it.cpu.expires, now)) {
932 /*
933 * Do-nothing timer expired and has no reload,
934 * so it's as if it was never set.
935 */
936 timer->it.cpu.expires.sched = 0;
937 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
938 return;
939 }
940 /*
941 * Account for any expirations and reloads that should
942 * have happened.
943 */
944 bump_cpu_timer(timer, now);
945 }
946
947 if (unlikely(clear_dead)) {
948 /*
949 * We've noticed that the thread is dead, but
950 * not yet reaped. Take this opportunity to
951 * drop our task ref.
952 */
953 clear_dead_task(timer, now);
954 goto dead;
955 }
956
957 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
958 sample_to_timespec(timer->it_clock,
959 cpu_time_sub(timer->it_clock,
960 timer->it.cpu.expires, now),
961 &itp->it_value);
962 } else {
963 /*
964 * The timer should have expired already, but the firing
965 * hasn't taken place yet. Say it's just about to expire.
966 */
967 itp->it_value.tv_nsec = 1;
968 itp->it_value.tv_sec = 0;
969 }
970}
971
972/*
973 * Check for any per-thread CPU timers that have fired and move them off
974 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
975 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
976 */
977static void check_thread_timers(struct task_struct *tsk,
978 struct list_head *firing)
979{
Linus Torvaldse80eda92005-10-23 10:02:50 -0700980 int maxfire;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 struct list_head *timers = tsk->cpu_timers;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100982 struct signal_struct *const sig = tsk->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Linus Torvaldse80eda92005-10-23 10:02:50 -0700984 maxfire = 20;
Frank Mayharf06febc2008-09-12 09:54:39 -0700985 tsk->cputime_expires.prof_exp = cputime_zero;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 while (!list_empty(timers)) {
Pavel Emelianovb5e61812007-05-08 00:30:19 -0700987 struct cpu_timer_list *t = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 struct cpu_timer_list,
989 entry);
Linus Torvaldse80eda92005-10-23 10:02:50 -0700990 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
Frank Mayharf06febc2008-09-12 09:54:39 -0700991 tsk->cputime_expires.prof_exp = t->expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 break;
993 }
994 t->firing = 1;
995 list_move_tail(&t->entry, firing);
996 }
997
998 ++timers;
Linus Torvaldse80eda92005-10-23 10:02:50 -0700999 maxfire = 20;
Frank Mayharf06febc2008-09-12 09:54:39 -07001000 tsk->cputime_expires.virt_exp = cputime_zero;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 while (!list_empty(timers)) {
Pavel Emelianovb5e61812007-05-08 00:30:19 -07001002 struct cpu_timer_list *t = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 struct cpu_timer_list,
1004 entry);
Linus Torvaldse80eda92005-10-23 10:02:50 -07001005 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
Frank Mayharf06febc2008-09-12 09:54:39 -07001006 tsk->cputime_expires.virt_exp = t->expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 break;
1008 }
1009 t->firing = 1;
1010 list_move_tail(&t->entry, firing);
1011 }
1012
1013 ++timers;
Linus Torvaldse80eda92005-10-23 10:02:50 -07001014 maxfire = 20;
Frank Mayharf06febc2008-09-12 09:54:39 -07001015 tsk->cputime_expires.sched_exp = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 while (!list_empty(timers)) {
Pavel Emelianovb5e61812007-05-08 00:30:19 -07001017 struct cpu_timer_list *t = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 struct cpu_timer_list,
1019 entry);
Ingo Molnar41b86e92007-07-09 18:51:58 +02001020 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
Frank Mayharf06febc2008-09-12 09:54:39 -07001021 tsk->cputime_expires.sched_exp = t->expires.sched;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 break;
1023 }
1024 t->firing = 1;
1025 list_move_tail(&t->entry, firing);
1026 }
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001027
1028 /*
1029 * Check for the special case thread timers.
1030 */
1031 if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
1032 unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
1033 unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
1034
Peter Zijlstra5a52dd52008-01-25 21:08:32 +01001035 if (hard != RLIM_INFINITY &&
1036 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001037 /*
1038 * At the hard limit, we just die.
1039 * No need to calculate anything else now.
1040 */
1041 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1042 return;
1043 }
1044 if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
1045 /*
1046 * At the soft limit, send a SIGXCPU every second.
1047 */
1048 if (sig->rlim[RLIMIT_RTTIME].rlim_cur
1049 < sig->rlim[RLIMIT_RTTIME].rlim_max) {
1050 sig->rlim[RLIMIT_RTTIME].rlim_cur +=
1051 USEC_PER_SEC;
1052 }
Hiroshi Shimamoto81d50bb2008-05-15 19:42:49 -07001053 printk(KERN_INFO
1054 "RT Watchdog Timeout: %s[%d]\n",
1055 tsk->comm, task_pid_nr(tsk));
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001056 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1057 }
1058 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059}
1060
Peter Zijlstra3fccfd62009-02-10 16:37:31 +01001061static void stop_process_timers(struct task_struct *tsk)
1062{
1063 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
1064 unsigned long flags;
1065
1066 if (!cputimer->running)
1067 return;
1068
1069 spin_lock_irqsave(&cputimer->lock, flags);
1070 cputimer->running = 0;
1071 spin_unlock_irqrestore(&cputimer->lock, flags);
1072}
1073
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074/*
1075 * Check for any per-thread CPU timers that have fired and move them
1076 * off the tsk->*_timers list onto the firing list. Per-thread timers
1077 * have already been taken off.
1078 */
1079static void check_process_timers(struct task_struct *tsk,
1080 struct list_head *firing)
1081{
Linus Torvaldse80eda92005-10-23 10:02:50 -07001082 int maxfire;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 struct signal_struct *const sig = tsk->signal;
Frank Mayharf06febc2008-09-12 09:54:39 -07001084 cputime_t utime, ptime, virt_expires, prof_expires;
Ingo Molnar41b86e92007-07-09 18:51:58 +02001085 unsigned long long sum_sched_runtime, sched_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 struct list_head *timers = sig->cpu_timers;
Frank Mayharf06febc2008-09-12 09:54:39 -07001087 struct task_cputime cputime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 /*
1090 * Don't sample the current process CPU clocks if there are no timers.
1091 */
1092 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1093 cputime_eq(sig->it_prof_expires, cputime_zero) &&
1094 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1095 list_empty(&timers[CPUCLOCK_VIRT]) &&
1096 cputime_eq(sig->it_virt_expires, cputime_zero) &&
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +01001097 list_empty(&timers[CPUCLOCK_SCHED])) {
1098 stop_process_timers(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 return;
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +01001100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 /*
1103 * Collect the current process totals.
1104 */
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +01001105 thread_group_cputimer(tsk, &cputime);
Frank Mayharf06febc2008-09-12 09:54:39 -07001106 utime = cputime.utime;
1107 ptime = cputime_add(utime, cputime.stime);
1108 sum_sched_runtime = cputime.sum_exec_runtime;
Linus Torvaldse80eda92005-10-23 10:02:50 -07001109 maxfire = 20;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 prof_expires = cputime_zero;
1111 while (!list_empty(timers)) {
WANG Congee7dd202008-04-04 20:54:10 +02001112 struct cpu_timer_list *tl = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 struct cpu_timer_list,
1114 entry);
WANG Congee7dd202008-04-04 20:54:10 +02001115 if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
1116 prof_expires = tl->expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 break;
1118 }
WANG Congee7dd202008-04-04 20:54:10 +02001119 tl->firing = 1;
1120 list_move_tail(&tl->entry, firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 }
1122
1123 ++timers;
Linus Torvaldse80eda92005-10-23 10:02:50 -07001124 maxfire = 20;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 virt_expires = cputime_zero;
1126 while (!list_empty(timers)) {
WANG Congee7dd202008-04-04 20:54:10 +02001127 struct cpu_timer_list *tl = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 struct cpu_timer_list,
1129 entry);
WANG Congee7dd202008-04-04 20:54:10 +02001130 if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
1131 virt_expires = tl->expires.cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 break;
1133 }
WANG Congee7dd202008-04-04 20:54:10 +02001134 tl->firing = 1;
1135 list_move_tail(&tl->entry, firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 }
1137
1138 ++timers;
Linus Torvaldse80eda92005-10-23 10:02:50 -07001139 maxfire = 20;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 sched_expires = 0;
1141 while (!list_empty(timers)) {
WANG Congee7dd202008-04-04 20:54:10 +02001142 struct cpu_timer_list *tl = list_first_entry(timers,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 struct cpu_timer_list,
1144 entry);
WANG Congee7dd202008-04-04 20:54:10 +02001145 if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1146 sched_expires = tl->expires.sched;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 break;
1148 }
WANG Congee7dd202008-04-04 20:54:10 +02001149 tl->firing = 1;
1150 list_move_tail(&tl->entry, firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 }
1152
1153 /*
1154 * Check for the special case process timers.
1155 */
1156 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1157 if (cputime_ge(ptime, sig->it_prof_expires)) {
1158 /* ITIMER_PROF fires and reloads. */
1159 sig->it_prof_expires = sig->it_prof_incr;
1160 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1161 sig->it_prof_expires = cputime_add(
1162 sig->it_prof_expires, ptime);
1163 }
1164 __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1165 }
1166 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1167 (cputime_eq(prof_expires, cputime_zero) ||
1168 cputime_lt(sig->it_prof_expires, prof_expires))) {
1169 prof_expires = sig->it_prof_expires;
1170 }
1171 }
1172 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1173 if (cputime_ge(utime, sig->it_virt_expires)) {
1174 /* ITIMER_VIRTUAL fires and reloads. */
1175 sig->it_virt_expires = sig->it_virt_incr;
1176 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1177 sig->it_virt_expires = cputime_add(
1178 sig->it_virt_expires, utime);
1179 }
1180 __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1181 }
1182 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1183 (cputime_eq(virt_expires, cputime_zero) ||
1184 cputime_lt(sig->it_virt_expires, virt_expires))) {
1185 virt_expires = sig->it_virt_expires;
1186 }
1187 }
1188 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1189 unsigned long psecs = cputime_to_secs(ptime);
1190 cputime_t x;
1191 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
1192 /*
1193 * At the hard limit, we just die.
1194 * No need to calculate anything else now.
1195 */
1196 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1197 return;
1198 }
1199 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
1200 /*
1201 * At the soft limit, send a SIGXCPU every second.
1202 */
1203 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1204 if (sig->rlim[RLIMIT_CPU].rlim_cur
1205 < sig->rlim[RLIMIT_CPU].rlim_max) {
1206 sig->rlim[RLIMIT_CPU].rlim_cur++;
1207 }
1208 }
1209 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
1210 if (cputime_eq(prof_expires, cputime_zero) ||
1211 cputime_lt(x, prof_expires)) {
1212 prof_expires = x;
1213 }
1214 }
1215
Frank Mayharf06febc2008-09-12 09:54:39 -07001216 if (!cputime_eq(prof_expires, cputime_zero) &&
1217 (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
1218 cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
1219 sig->cputime_expires.prof_exp = prof_expires;
1220 if (!cputime_eq(virt_expires, cputime_zero) &&
1221 (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
1222 cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
1223 sig->cputime_expires.virt_exp = virt_expires;
1224 if (sched_expires != 0 &&
1225 (sig->cputime_expires.sched_exp == 0 ||
1226 sig->cputime_expires.sched_exp > sched_expires))
1227 sig->cputime_expires.sched_exp = sched_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228}
1229
1230/*
1231 * This is called from the signal code (via do_schedule_next_timer)
1232 * when the last timer signal was delivered and we have to reload the timer.
1233 */
1234void posix_cpu_timer_schedule(struct k_itimer *timer)
1235{
1236 struct task_struct *p = timer->it.cpu.task;
1237 union cpu_time_count now;
1238
1239 if (unlikely(p == NULL))
1240 /*
1241 * The task was cleaned up already, no future firings.
1242 */
Roland McGrath708f430d2005-10-30 15:03:13 -08001243 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
1245 /*
1246 * Fetch the current sample and update the timer's expiry time.
1247 */
1248 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1249 cpu_clock_sample(timer->it_clock, p, &now);
1250 bump_cpu_timer(timer, now);
1251 if (unlikely(p->exit_state)) {
1252 clear_dead_task(timer, now);
Roland McGrath708f430d2005-10-30 15:03:13 -08001253 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 }
1255 read_lock(&tasklist_lock); /* arm_timer needs it. */
1256 } else {
1257 read_lock(&tasklist_lock);
1258 if (unlikely(p->signal == NULL)) {
1259 /*
1260 * The process has been reaped.
1261 * We can't even collect a sample any more.
1262 */
1263 put_task_struct(p);
1264 timer->it.cpu.task = p = NULL;
1265 timer->it.cpu.expires.sched = 0;
Roland McGrath708f430d2005-10-30 15:03:13 -08001266 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1268 /*
1269 * We've noticed that the thread is dead, but
1270 * not yet reaped. Take this opportunity to
1271 * drop our task ref.
1272 */
1273 clear_dead_task(timer, now);
Roland McGrath708f430d2005-10-30 15:03:13 -08001274 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 }
Peter Zijlstra3997ad32009-02-12 15:00:52 +01001276 cpu_timer_sample_group(timer->it_clock, p, &now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 bump_cpu_timer(timer, now);
1278 /* Leave the tasklist_lock locked for the call below. */
1279 }
1280
1281 /*
1282 * Now re-arm for the new expiry time.
1283 */
1284 arm_timer(timer, now);
1285
Roland McGrath708f430d2005-10-30 15:03:13 -08001286out_unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 read_unlock(&tasklist_lock);
Roland McGrath708f430d2005-10-30 15:03:13 -08001288
1289out:
1290 timer->it_overrun_last = timer->it_overrun;
1291 timer->it_overrun = -1;
1292 ++timer->it_requeue_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293}
1294
Frank Mayharf06febc2008-09-12 09:54:39 -07001295/**
1296 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1297 *
1298 * @cputime: The struct to compare.
1299 *
1300 * Checks @cputime to see if all fields are zero. Returns true if all fields
1301 * are zero, false if any field is nonzero.
1302 */
1303static inline int task_cputime_zero(const struct task_cputime *cputime)
1304{
1305 if (cputime_eq(cputime->utime, cputime_zero) &&
1306 cputime_eq(cputime->stime, cputime_zero) &&
1307 cputime->sum_exec_runtime == 0)
1308 return 1;
1309 return 0;
1310}
1311
1312/**
1313 * task_cputime_expired - Compare two task_cputime entities.
1314 *
1315 * @sample: The task_cputime structure to be checked for expiration.
1316 * @expires: Expiration times, against which @sample will be checked.
1317 *
1318 * Checks @sample against @expires to see if any field of @sample has expired.
1319 * Returns true if any field of the former is greater than the corresponding
1320 * field of the latter if the latter field is set. Otherwise returns false.
1321 */
1322static inline int task_cputime_expired(const struct task_cputime *sample,
1323 const struct task_cputime *expires)
1324{
1325 if (!cputime_eq(expires->utime, cputime_zero) &&
1326 cputime_ge(sample->utime, expires->utime))
1327 return 1;
1328 if (!cputime_eq(expires->stime, cputime_zero) &&
1329 cputime_ge(cputime_add(sample->utime, sample->stime),
1330 expires->stime))
1331 return 1;
1332 if (expires->sum_exec_runtime != 0 &&
1333 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1334 return 1;
1335 return 0;
1336}
1337
1338/**
1339 * fastpath_timer_check - POSIX CPU timers fast path.
1340 *
1341 * @tsk: The task (thread) being checked.
Frank Mayharf06febc2008-09-12 09:54:39 -07001342 *
Frank Mayharbb34d922008-09-12 09:54:39 -07001343 * Check the task and thread group timers. If both are zero (there are no
1344 * timers set) return false. Otherwise snapshot the task and thread group
1345 * timers and compare them with the corresponding expiration times. Return
1346 * true if a timer has expired, else return false.
Frank Mayharf06febc2008-09-12 09:54:39 -07001347 */
Frank Mayharbb34d922008-09-12 09:54:39 -07001348static inline int fastpath_timer_check(struct task_struct *tsk)
Frank Mayharf06febc2008-09-12 09:54:39 -07001349{
Oleg Nesterovad133ba2008-11-17 15:39:47 +01001350 struct signal_struct *sig;
Frank Mayharf06febc2008-09-12 09:54:39 -07001351
Oleg Nesterovad133ba2008-11-17 15:39:47 +01001352 /* tsk == current, ensure it is safe to use ->signal/sighand */
1353 if (unlikely(tsk->exit_state))
Frank Mayharf06febc2008-09-12 09:54:39 -07001354 return 0;
Frank Mayharbb34d922008-09-12 09:54:39 -07001355
1356 if (!task_cputime_zero(&tsk->cputime_expires)) {
1357 struct task_cputime task_sample = {
1358 .utime = tsk->utime,
1359 .stime = tsk->stime,
1360 .sum_exec_runtime = tsk->se.sum_exec_runtime
1361 };
1362
1363 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1364 return 1;
1365 }
Oleg Nesterovad133ba2008-11-17 15:39:47 +01001366
1367 sig = tsk->signal;
Frank Mayharbb34d922008-09-12 09:54:39 -07001368 if (!task_cputime_zero(&sig->cputime_expires)) {
1369 struct task_cputime group_sample;
1370
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +01001371 thread_group_cputimer(tsk, &group_sample);
Frank Mayharbb34d922008-09-12 09:54:39 -07001372 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1373 return 1;
1374 }
Oleg Nesterov37bebc72009-03-23 20:34:11 +01001375
1376 return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
Frank Mayharf06febc2008-09-12 09:54:39 -07001377}
1378
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379/*
1380 * This is called from the timer interrupt handler. The irq handler has
1381 * already updated our counts. We need to check if any timers fire now.
1382 * Interrupts are disabled.
1383 */
1384void run_posix_cpu_timers(struct task_struct *tsk)
1385{
1386 LIST_HEAD(firing);
1387 struct k_itimer *timer, *next;
1388
1389 BUG_ON(!irqs_disabled());
1390
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001392 * The fast path checks that there are no expired thread or thread
Frank Mayharbb34d922008-09-12 09:54:39 -07001393 * group timers. If that's so, just return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 */
Frank Mayharbb34d922008-09-12 09:54:39 -07001395 if (!fastpath_timer_check(tsk))
Frank Mayharf06febc2008-09-12 09:54:39 -07001396 return;
Ingo Molnar5ce73a42008-09-14 17:11:46 +02001397
Frank Mayharbb34d922008-09-12 09:54:39 -07001398 spin_lock(&tsk->sighand->siglock);
1399 /*
1400 * Here we take off tsk->signal->cpu_timers[N] and
1401 * tsk->cpu_timers[N] all the timers that are firing, and
1402 * put them on the firing list.
1403 */
1404 check_thread_timers(tsk, &firing);
1405 check_process_timers(tsk, &firing);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406
Frank Mayharbb34d922008-09-12 09:54:39 -07001407 /*
1408 * We must release these locks before taking any timer's lock.
1409 * There is a potential race with timer deletion here, as the
1410 * siglock now protects our private firing list. We have set
1411 * the firing flag in each timer, so that a deletion attempt
1412 * that gets the timer lock before we do will give it up and
1413 * spin until we've taken care of that timer below.
1414 */
1415 spin_unlock(&tsk->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
1417 /*
1418 * Now that all the timers on our list have the firing flag,
1419 * noone will touch their list entries but us. We'll take
1420 * each timer's lock before clearing its firing flag, so no
1421 * timer call will interfere.
1422 */
1423 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001424 int cpu_firing;
1425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 spin_lock(&timer->it_lock);
1427 list_del_init(&timer->it.cpu.entry);
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001428 cpu_firing = timer->it.cpu.firing;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 timer->it.cpu.firing = 0;
1430 /*
1431 * The firing flag is -1 if we collided with a reset
1432 * of the timer, which already reported this
1433 * almost-firing as an overrun. So don't generate an event.
1434 */
H Hartley Sweeten6e85c5b2009-04-29 19:14:32 -04001435 if (likely(cpu_firing >= 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 cpu_timer_fire(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 spin_unlock(&timer->it_lock);
1438 }
1439}
1440
1441/*
1442 * Set one of the process-wide special case CPU timers.
Frank Mayharf06febc2008-09-12 09:54:39 -07001443 * The tsk->sighand->siglock must be held by the caller.
1444 * The *newval argument is relative and we update it to be absolute, *oldval
1445 * is absolute and we update it to be relative.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 */
1447void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1448 cputime_t *newval, cputime_t *oldval)
1449{
1450 union cpu_time_count now;
1451 struct list_head *head;
1452
1453 BUG_ON(clock_idx == CPUCLOCK_SCHED);
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +01001454 cpu_timer_sample_group(clock_idx, tsk, &now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 if (oldval) {
1457 if (!cputime_eq(*oldval, cputime_zero)) {
1458 if (cputime_le(*oldval, now.cpu)) {
1459 /* Just about to fire. */
1460 *oldval = jiffies_to_cputime(1);
1461 } else {
1462 *oldval = cputime_sub(*oldval, now.cpu);
1463 }
1464 }
1465
1466 if (cputime_eq(*newval, cputime_zero))
1467 return;
1468 *newval = cputime_add(*newval, now.cpu);
1469
1470 /*
1471 * If the RLIMIT_CPU timer will expire before the
1472 * ITIMER_PROF timer, we have nothing else to do.
1473 */
1474 if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
1475 < cputime_to_secs(*newval))
1476 return;
1477 }
1478
1479 /*
1480 * Check whether there are any process timers already set to fire
1481 * before this one. If so, we don't have anything more to do.
1482 */
1483 head = &tsk->signal->cpu_timers[clock_idx];
1484 if (list_empty(head) ||
Pavel Emelianovb5e61812007-05-08 00:30:19 -07001485 cputime_ge(list_first_entry(head,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 struct cpu_timer_list, entry)->expires.cpu,
1487 *newval)) {
Frank Mayharf06febc2008-09-12 09:54:39 -07001488 switch (clock_idx) {
1489 case CPUCLOCK_PROF:
1490 tsk->signal->cputime_expires.prof_exp = *newval;
1491 break;
1492 case CPUCLOCK_VIRT:
1493 tsk->signal->cputime_expires.virt_exp = *newval;
1494 break;
1495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 }
1497}
1498
Toyo Abee4b76552006-09-29 02:00:29 -07001499static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1500 struct timespec *rqtp, struct itimerspec *it)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 struct k_itimer timer;
1503 int error;
1504
1505 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 * Set up a temporary timer and then wait for it to go off.
1507 */
1508 memset(&timer, 0, sizeof timer);
1509 spin_lock_init(&timer.it_lock);
1510 timer.it_clock = which_clock;
1511 timer.it_overrun = -1;
1512 error = posix_cpu_timer_create(&timer);
1513 timer.it_process = current;
1514 if (!error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 static struct itimerspec zero_it;
Toyo Abee4b76552006-09-29 02:00:29 -07001516
1517 memset(it, 0, sizeof *it);
1518 it->it_value = *rqtp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
1520 spin_lock_irq(&timer.it_lock);
Toyo Abee4b76552006-09-29 02:00:29 -07001521 error = posix_cpu_timer_set(&timer, flags, it, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 if (error) {
1523 spin_unlock_irq(&timer.it_lock);
1524 return error;
1525 }
1526
1527 while (!signal_pending(current)) {
1528 if (timer.it.cpu.expires.sched == 0) {
1529 /*
1530 * Our timer fired and was reset.
1531 */
1532 spin_unlock_irq(&timer.it_lock);
1533 return 0;
1534 }
1535
1536 /*
1537 * Block until cpu_timer_fire (or a signal) wakes us.
1538 */
1539 __set_current_state(TASK_INTERRUPTIBLE);
1540 spin_unlock_irq(&timer.it_lock);
1541 schedule();
1542 spin_lock_irq(&timer.it_lock);
1543 }
1544
1545 /*
1546 * We were interrupted by a signal.
1547 */
1548 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
Toyo Abee4b76552006-09-29 02:00:29 -07001549 posix_cpu_timer_set(&timer, 0, &zero_it, it);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 spin_unlock_irq(&timer.it_lock);
1551
Toyo Abee4b76552006-09-29 02:00:29 -07001552 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 /*
1554 * It actually did fire already.
1555 */
1556 return 0;
1557 }
1558
Toyo Abee4b76552006-09-29 02:00:29 -07001559 error = -ERESTART_RESTARTBLOCK;
1560 }
1561
1562 return error;
1563}
1564
1565int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1566 struct timespec *rqtp, struct timespec __user *rmtp)
1567{
1568 struct restart_block *restart_block =
1569 &current_thread_info()->restart_block;
1570 struct itimerspec it;
1571 int error;
1572
1573 /*
1574 * Diagnose required errors first.
1575 */
1576 if (CPUCLOCK_PERTHREAD(which_clock) &&
1577 (CPUCLOCK_PID(which_clock) == 0 ||
1578 CPUCLOCK_PID(which_clock) == current->pid))
1579 return -EINVAL;
1580
1581 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1582
1583 if (error == -ERESTART_RESTARTBLOCK) {
1584
1585 if (flags & TIMER_ABSTIME)
1586 return -ERESTARTNOHAND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 /*
Toyo Abee4b76552006-09-29 02:00:29 -07001588 * Report back to the user the time still remaining.
1589 */
1590 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 return -EFAULT;
1592
Toyo Abe1711ef32006-09-29 02:00:28 -07001593 restart_block->fn = posix_cpu_nsleep_restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 restart_block->arg0 = which_clock;
Thomas Gleixner97735f22006-01-09 20:52:37 -08001595 restart_block->arg1 = (unsigned long) rmtp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 restart_block->arg2 = rqtp->tv_sec;
1597 restart_block->arg3 = rqtp->tv_nsec;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 return error;
1600}
1601
Toyo Abe1711ef32006-09-29 02:00:28 -07001602long posix_cpu_nsleep_restart(struct restart_block *restart_block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603{
1604 clockid_t which_clock = restart_block->arg0;
Thomas Gleixner97735f22006-01-09 20:52:37 -08001605 struct timespec __user *rmtp;
1606 struct timespec t;
Toyo Abee4b76552006-09-29 02:00:29 -07001607 struct itimerspec it;
1608 int error;
Thomas Gleixner97735f22006-01-09 20:52:37 -08001609
1610 rmtp = (struct timespec __user *) restart_block->arg1;
1611 t.tv_sec = restart_block->arg2;
1612 t.tv_nsec = restart_block->arg3;
1613
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 restart_block->fn = do_no_restart_syscall;
Toyo Abee4b76552006-09-29 02:00:29 -07001615 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1616
1617 if (error == -ERESTART_RESTARTBLOCK) {
1618 /*
1619 * Report back to the user the time still remaining.
1620 */
1621 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1622 return -EFAULT;
1623
1624 restart_block->fn = posix_cpu_nsleep_restart;
1625 restart_block->arg0 = which_clock;
1626 restart_block->arg1 = (unsigned long) rmtp;
1627 restart_block->arg2 = t.tv_sec;
1628 restart_block->arg3 = t.tv_nsec;
1629 }
1630 return error;
1631
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632}
1633
1634
1635#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1636#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1637
Thomas Gleixnera924b042006-01-09 20:52:27 -08001638static int process_cpu_clock_getres(const clockid_t which_clock,
1639 struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640{
1641 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1642}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001643static int process_cpu_clock_get(const clockid_t which_clock,
1644 struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645{
1646 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1647}
1648static int process_cpu_timer_create(struct k_itimer *timer)
1649{
1650 timer->it_clock = PROCESS_CLOCK;
1651 return posix_cpu_timer_create(timer);
1652}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001653static int process_cpu_nsleep(const clockid_t which_clock, int flags,
Thomas Gleixner97735f22006-01-09 20:52:37 -08001654 struct timespec *rqtp,
1655 struct timespec __user *rmtp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656{
Thomas Gleixner97735f22006-01-09 20:52:37 -08001657 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658}
Toyo Abe1711ef32006-09-29 02:00:28 -07001659static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1660{
1661 return -EINVAL;
1662}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001663static int thread_cpu_clock_getres(const clockid_t which_clock,
1664 struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665{
1666 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1667}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001668static int thread_cpu_clock_get(const clockid_t which_clock,
1669 struct timespec *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670{
1671 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1672}
1673static int thread_cpu_timer_create(struct k_itimer *timer)
1674{
1675 timer->it_clock = THREAD_CLOCK;
1676 return posix_cpu_timer_create(timer);
1677}
Thomas Gleixnera924b042006-01-09 20:52:27 -08001678static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
Thomas Gleixner97735f22006-01-09 20:52:37 -08001679 struct timespec *rqtp, struct timespec __user *rmtp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680{
1681 return -EINVAL;
1682}
Toyo Abe1711ef32006-09-29 02:00:28 -07001683static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
1684{
1685 return -EINVAL;
1686}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687
1688static __init int init_posix_cpu_timers(void)
1689{
1690 struct k_clock process = {
1691 .clock_getres = process_cpu_clock_getres,
1692 .clock_get = process_cpu_clock_get,
1693 .clock_set = do_posix_clock_nosettime,
1694 .timer_create = process_cpu_timer_create,
1695 .nsleep = process_cpu_nsleep,
Toyo Abe1711ef32006-09-29 02:00:28 -07001696 .nsleep_restart = process_cpu_nsleep_restart,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 };
1698 struct k_clock thread = {
1699 .clock_getres = thread_cpu_clock_getres,
1700 .clock_get = thread_cpu_clock_get,
1701 .clock_set = do_posix_clock_nosettime,
1702 .timer_create = thread_cpu_timer_create,
1703 .nsleep = thread_cpu_nsleep,
Toyo Abe1711ef32006-09-29 02:00:28 -07001704 .nsleep_restart = thread_cpu_nsleep_restart,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 };
1706
1707 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1708 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1709
1710 return 0;
1711}
1712__initcall(init_posix_cpu_timers);