blob: 499672c10cbd615141a362cafe804711bbcf1ff3 [file] [log] [blame]
Ingo Molnarfa72e9e2007-07-09 18:51:58 +02001/*
2 * idle-task scheduling class.
3 *
4 * (NOTE: these are not related to SCHED_IDLE tasks which are
5 * handled in sched_fair.c)
6 */
7
Gregory Haskinse7693a32008-01-25 21:08:09 +01008#ifdef CONFIG_SMP
9static int select_task_rq_idle(struct task_struct *p, int sync)
10{
11 return task_cpu(p); /* IDLE tasks as never migrated */
12}
13#endif /* CONFIG_SMP */
Ingo Molnarfa72e9e2007-07-09 18:51:58 +020014/*
15 * Idle tasks are unconditionally rescheduled:
16 */
Peter Zijlstra15afe092008-09-20 23:38:02 +020017static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync)
Ingo Molnarfa72e9e2007-07-09 18:51:58 +020018{
19 resched_task(rq->idle);
20}
21
Ingo Molnarfb8d4722007-08-09 11:16:48 +020022static struct task_struct *pick_next_task_idle(struct rq *rq)
Ingo Molnarfa72e9e2007-07-09 18:51:58 +020023{
24 schedstat_inc(rq, sched_goidle);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +020025 /* adjust the active tasks as we might go into a long sleep */
26 calc_load_account_active(rq);
Ingo Molnarfa72e9e2007-07-09 18:51:58 +020027 return rq->idle;
28}
29
30/*
31 * It is not legal to sleep in the idle task - print a warning
32 * message if some code attempts to do it:
33 */
34static void
Ingo Molnarf02231e2007-08-09 11:16:48 +020035dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnarfa72e9e2007-07-09 18:51:58 +020036{
37 spin_unlock_irq(&rq->lock);
38 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
39 dump_stack();
40 spin_lock_irq(&rq->lock);
41}
42
Ingo Molnar31ee5292007-08-09 11:16:49 +020043static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
Ingo Molnarfa72e9e2007-07-09 18:51:58 +020044{
45}
46
Peter Williams681f3e62007-10-24 18:23:51 +020047#ifdef CONFIG_SMP
Peter Williams43010652007-08-09 11:16:46 +020048static unsigned long
Ingo Molnarfa72e9e2007-07-09 18:51:58 +020049load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
Peter Williamse1d14842007-10-24 18:23:51 +020050 unsigned long max_load_move,
51 struct sched_domain *sd, enum cpu_idle_type idle,
52 int *all_pinned, int *this_best_prio)
53{
54 return 0;
55}
56
57static int
58move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
59 struct sched_domain *sd, enum cpu_idle_type idle)
Ingo Molnarfa72e9e2007-07-09 18:51:58 +020060{
61 return 0;
62}
Peter Williams681f3e62007-10-24 18:23:51 +020063#endif
Ingo Molnarfa72e9e2007-07-09 18:51:58 +020064
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010065static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarfa72e9e2007-07-09 18:51:58 +020066{
67}
68
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +020069static void set_curr_task_idle(struct rq *rq)
70{
71}
72
Steven Rostedtcb469842008-01-25 21:08:22 +010073static void switched_to_idle(struct rq *rq, struct task_struct *p,
74 int running)
75{
76 /* Can this actually happen?? */
77 if (running)
78 resched_task(rq->curr);
79 else
Peter Zijlstra15afe092008-09-20 23:38:02 +020080 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +010081}
82
83static void prio_changed_idle(struct rq *rq, struct task_struct *p,
84 int oldprio, int running)
85{
86 /* This can happen for hot plug CPUS */
87
88 /*
89 * Reschedule if we are currently running on this runqueue and
90 * our priority decreased, or if we are not currently running on
91 * this runqueue and our priority is higher than the current's
92 */
93 if (running) {
94 if (p->prio > oldprio)
95 resched_task(rq->curr);
96 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +020097 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +010098}
99
Ingo Molnarfa72e9e2007-07-09 18:51:58 +0200100/*
101 * Simple, special scheduling class for the per-CPU idle tasks:
102 */
Harvey Harrison2abdad02008-04-25 10:53:13 -0700103static const struct sched_class idle_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +0200104 /* .next is NULL */
Ingo Molnarfa72e9e2007-07-09 18:51:58 +0200105 /* no enqueue/yield_task for idle tasks */
106
107 /* dequeue is not valid, we print a debug message there: */
108 .dequeue_task = dequeue_task_idle,
109
110 .check_preempt_curr = check_preempt_curr_idle,
111
112 .pick_next_task = pick_next_task_idle,
113 .put_prev_task = put_prev_task_idle,
114
Peter Williams681f3e62007-10-24 18:23:51 +0200115#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +0800116 .select_task_rq = select_task_rq_idle,
117
Ingo Molnarfa72e9e2007-07-09 18:51:58 +0200118 .load_balance = load_balance_idle,
Peter Williamse1d14842007-10-24 18:23:51 +0200119 .move_one_task = move_one_task_idle,
Peter Williams681f3e62007-10-24 18:23:51 +0200120#endif
Ingo Molnarfa72e9e2007-07-09 18:51:58 +0200121
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +0200122 .set_curr_task = set_curr_task_idle,
Ingo Molnarfa72e9e2007-07-09 18:51:58 +0200123 .task_tick = task_tick_idle,
Steven Rostedtcb469842008-01-25 21:08:22 +0100124
125 .prio_changed = prio_changed_idle,
126 .switched_to = switched_to_idle,
127
Ingo Molnarfa72e9e2007-07-09 18:51:58 +0200128 /* no .task_new for idle tasks */
129};