Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1 | #include "sched.h" |
| 2 | |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 3 | /* |
| 4 | * idle-task scheduling class. |
| 5 | * |
| 6 | * (NOTE: these are not related to SCHED_IDLE tasks which are |
Hiroshi Shimamoto | 489a71b | 2012-04-02 17:00:44 +0900 | [diff] [blame] | 7 | * handled in sched/fair.c) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 10 | #ifdef CONFIG_SMP |
Peter Zijlstra | 0017d73 | 2010-03-24 18:34:10 +0100 | [diff] [blame] | 11 | static int |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 12 | select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) |
Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 13 | { |
| 14 | return task_cpu(p); /* IDLE tasks as never migrated */ |
| 15 | } |
| 16 | #endif /* CONFIG_SMP */ |
Peter Zijlstra | 38033c3 | 2014-01-23 20:32:21 +0100 | [diff] [blame] | 17 | |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 18 | /* |
| 19 | * Idle tasks are unconditionally rescheduled: |
| 20 | */ |
Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 21 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 22 | { |
Kirill Tkhai | 8875125 | 2014-06-29 00:03:57 +0400 | [diff] [blame] | 23 | resched_curr(rq); |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 24 | } |
| 25 | |
Peter Zijlstra | 606dba2 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 26 | static struct task_struct * |
Peter Zijlstra | e7904a2 | 2015-08-01 19:25:08 +0200 | [diff] [blame] | 27 | pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 28 | { |
Peter Zijlstra | 3f1d2a3 | 2014-02-12 10:49:30 +0100 | [diff] [blame] | 29 | put_prev_task(rq, prev); |
Peter Zijlstra | 10e2f1a | 2016-05-09 10:38:05 +0200 | [diff] [blame] | 30 | update_idle_core(rq); |
Josh Poimboeuf | ae92882 | 2016-06-17 12:43:24 -0500 | [diff] [blame] | 31 | schedstat_inc(rq->sched_goidle); |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 32 | return rq->idle; |
| 33 | } |
| 34 | |
| 35 | /* |
| 36 | * It is not legal to sleep in the idle task - print a warning |
| 37 | * message if some code attempts to do it: |
| 38 | */ |
| 39 | static void |
Peter Zijlstra | 371fd7e | 2010-03-24 16:38:48 +0100 | [diff] [blame] | 40 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 41 | { |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 42 | raw_spin_unlock_irq(&rq->lock); |
Peter Zijlstra | 3df0fc5 | 2009-12-20 14:23:57 +0100 | [diff] [blame] | 43 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 44 | dump_stack(); |
Thomas Gleixner | 05fa785 | 2009-11-17 14:28:38 +0100 | [diff] [blame] | 45 | raw_spin_lock_irq(&rq->lock); |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 46 | } |
| 47 | |
Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 48 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 49 | { |
Peter Zijlstra | 38033c3 | 2014-01-23 20:32:21 +0100 | [diff] [blame] | 50 | rq_last_tick_reset(rq); |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 51 | } |
| 52 | |
Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 53 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 54 | { |
| 55 | } |
| 56 | |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 57 | static void set_curr_task_idle(struct rq *rq) |
| 58 | { |
| 59 | } |
| 60 | |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 61 | static void switched_to_idle(struct rq *rq, struct task_struct *p) |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 62 | { |
Peter Zijlstra | a8941d7 | 2011-01-25 16:30:03 +0100 | [diff] [blame] | 63 | BUG(); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 64 | } |
| 65 | |
Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 66 | static void |
| 67 | prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 68 | { |
Peter Zijlstra | a8941d7 | 2011-01-25 16:30:03 +0100 | [diff] [blame] | 69 | BUG(); |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 70 | } |
| 71 | |
H Hartley Sweeten | 6d686f4 | 2010-01-13 20:21:52 -0700 | [diff] [blame] | 72 | static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) |
Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 73 | { |
| 74 | return 0; |
| 75 | } |
| 76 | |
Thomas Gleixner | 90e362f | 2014-11-23 23:04:52 +0100 | [diff] [blame] | 77 | static void update_curr_idle(struct rq *rq) |
| 78 | { |
| 79 | } |
| 80 | |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 81 | /* |
| 82 | * Simple, special scheduling class for the per-CPU idle tasks: |
| 83 | */ |
Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 84 | const struct sched_class idle_sched_class = { |
Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 85 | /* .next is NULL */ |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 86 | /* no enqueue/yield_task for idle tasks */ |
| 87 | |
| 88 | /* dequeue is not valid, we print a debug message there: */ |
| 89 | .dequeue_task = dequeue_task_idle, |
| 90 | |
| 91 | .check_preempt_curr = check_preempt_curr_idle, |
| 92 | |
| 93 | .pick_next_task = pick_next_task_idle, |
| 94 | .put_prev_task = put_prev_task_idle, |
| 95 | |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 96 | #ifdef CONFIG_SMP |
Li Zefan | 4ce72a2 | 2008-10-22 15:25:26 +0800 | [diff] [blame] | 97 | .select_task_rq = select_task_rq_idle, |
Peter Zijlstra | c5b2803 | 2015-05-15 17:43:35 +0200 | [diff] [blame] | 98 | .set_cpus_allowed = set_cpus_allowed_common, |
Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 99 | #endif |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 100 | |
Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 101 | .set_curr_task = set_curr_task_idle, |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 102 | .task_tick = task_tick_idle, |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 103 | |
Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 104 | .get_rr_interval = get_rr_interval_idle, |
| 105 | |
Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 106 | .prio_changed = prio_changed_idle, |
| 107 | .switched_to = switched_to_idle, |
Thomas Gleixner | 90e362f | 2014-11-23 23:04:52 +0100 | [diff] [blame] | 108 | .update_curr = update_curr_idle, |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 109 | }; |