Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* |
| 2 | * idle-task scheduling class. |
| 3 | * |
| 4 | * (NOTE: these are not related to SCHED_IDLE tasks which are |
| 5 | * handled in sched_fair.c) |
| 6 | */ |
| 7 | |
| 8 | /* |
| 9 | * Idle tasks are unconditionally rescheduled: |
| 10 | */ |
| 11 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) |
| 12 | { |
| 13 | resched_task(rq->idle); |
| 14 | } |
| 15 | |
| 16 | static struct task_struct *pick_next_task_idle(struct rq *rq, u64 now) |
| 17 | { |
| 18 | schedstat_inc(rq, sched_goidle); |
| 19 | |
| 20 | return rq->idle; |
| 21 | } |
| 22 | |
| 23 | /* |
| 24 | * It is not legal to sleep in the idle task - print a warning |
| 25 | * message if some code attempts to do it: |
| 26 | */ |
| 27 | static void |
Ingo Molnar | f02231e | 2007-08-09 11:16:48 +0200 | [diff] [blame^] | 28 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 29 | { |
| 30 | spin_unlock_irq(&rq->lock); |
| 31 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); |
| 32 | dump_stack(); |
| 33 | spin_lock_irq(&rq->lock); |
| 34 | } |
| 35 | |
| 36 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, u64 now) |
| 37 | { |
| 38 | } |
| 39 | |
Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 40 | static unsigned long |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 41 | load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, |
| 42 | unsigned long max_nr_move, unsigned long max_load_move, |
| 43 | struct sched_domain *sd, enum cpu_idle_type idle, |
Peter Williams | a4ac01c | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 44 | int *all_pinned, int *this_best_prio) |
Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 45 | { |
| 46 | return 0; |
| 47 | } |
| 48 | |
| 49 | static void task_tick_idle(struct rq *rq, struct task_struct *curr) |
| 50 | { |
| 51 | } |
| 52 | |
| 53 | /* |
| 54 | * Simple, special scheduling class for the per-CPU idle tasks: |
| 55 | */ |
| 56 | static struct sched_class idle_sched_class __read_mostly = { |
| 57 | /* no enqueue/yield_task for idle tasks */ |
| 58 | |
| 59 | /* dequeue is not valid, we print a debug message there: */ |
| 60 | .dequeue_task = dequeue_task_idle, |
| 61 | |
| 62 | .check_preempt_curr = check_preempt_curr_idle, |
| 63 | |
| 64 | .pick_next_task = pick_next_task_idle, |
| 65 | .put_prev_task = put_prev_task_idle, |
| 66 | |
| 67 | .load_balance = load_balance_idle, |
| 68 | |
| 69 | .task_tick = task_tick_idle, |
| 70 | /* no .task_new for idle tasks */ |
| 71 | }; |