blob: dcdcad632fd9c8a9d9e603f28e70db2acb03fbc5 [file] [log] [blame]
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6/*
7 * Update the current task's runtime statistics. Skip current tasks that
8 * are not in our scheduling class.
9 */
Ingo Molnarf1e14ef2007-08-09 11:16:48 +020010static inline void update_curr_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020011{
12 struct task_struct *curr = rq->curr;
13 u64 delta_exec;
14
15 if (!task_has_rt_policy(curr))
16 return;
17
Ingo Molnard2819182007-08-09 11:16:47 +020018 delta_exec = rq->clock - curr->se.exec_start;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020019 if (unlikely((s64)delta_exec < 0))
20 delta_exec = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +020021
22 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020023
24 curr->se.sum_exec_runtime += delta_exec;
Ingo Molnard2819182007-08-09 11:16:47 +020025 curr->se.exec_start = rq->clock;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020026}
27
Ingo Molnarfd390f62007-08-09 11:16:48 +020028static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020029{
30 struct rt_prio_array *array = &rq->rt.active;
31
32 list_add_tail(&p->run_list, array->queue + p->prio);
33 __set_bit(p->prio, array->bitmap);
34}
35
36/*
37 * Adding/removing a task to/from a priority array:
38 */
Ingo Molnarf02231e2007-08-09 11:16:48 +020039static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020040{
41 struct rt_prio_array *array = &rq->rt.active;
42
Ingo Molnarf1e14ef2007-08-09 11:16:48 +020043 update_curr_rt(rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020044
45 list_del(&p->run_list);
46 if (list_empty(array->queue + p->prio))
47 __clear_bit(p->prio, array->bitmap);
48}
49
50/*
51 * Put task to the end of the run list without the overhead of dequeue
52 * followed by enqueue.
53 */
54static void requeue_task_rt(struct rq *rq, struct task_struct *p)
55{
56 struct rt_prio_array *array = &rq->rt.active;
57
58 list_move_tail(&p->run_list, array->queue + p->prio);
59}
60
61static void
62yield_task_rt(struct rq *rq, struct task_struct *p)
63{
64 requeue_task_rt(rq, p);
65}
66
67/*
68 * Preempt the current task with a newly woken task if needed:
69 */
70static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
71{
72 if (p->prio < rq->curr->prio)
73 resched_task(rq->curr);
74}
75
Ingo Molnarfb8d4722007-08-09 11:16:48 +020076static struct task_struct *pick_next_task_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020077{
78 struct rt_prio_array *array = &rq->rt.active;
79 struct task_struct *next;
80 struct list_head *queue;
81 int idx;
82
83 idx = sched_find_first_bit(array->bitmap);
84 if (idx >= MAX_RT_PRIO)
85 return NULL;
86
87 queue = array->queue + idx;
88 next = list_entry(queue->next, struct task_struct, run_list);
89
Ingo Molnard2819182007-08-09 11:16:47 +020090 next->se.exec_start = rq->clock;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020091
92 return next;
93}
94
Ingo Molnar31ee5292007-08-09 11:16:49 +020095static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020096{
Ingo Molnarf1e14ef2007-08-09 11:16:48 +020097 update_curr_rt(rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +020098 p->se.exec_start = 0;
99}
100
101/*
102 * Load-balancing iterator. Note: while the runqueue stays locked
103 * during the whole iteration, the current task might be
104 * dequeued so the iterator has to be dequeue-safe. Here we
105 * achieve that by always pre-iterating before returning
106 * the current task:
107 */
108static struct task_struct *load_balance_start_rt(void *arg)
109{
110 struct rq *rq = arg;
111 struct rt_prio_array *array = &rq->rt.active;
112 struct list_head *head, *curr;
113 struct task_struct *p;
114 int idx;
115
116 idx = sched_find_first_bit(array->bitmap);
117 if (idx >= MAX_RT_PRIO)
118 return NULL;
119
120 head = array->queue + idx;
121 curr = head->prev;
122
123 p = list_entry(curr, struct task_struct, run_list);
124
125 curr = curr->prev;
126
127 rq->rt.rt_load_balance_idx = idx;
128 rq->rt.rt_load_balance_head = head;
129 rq->rt.rt_load_balance_curr = curr;
130
131 return p;
132}
133
134static struct task_struct *load_balance_next_rt(void *arg)
135{
136 struct rq *rq = arg;
137 struct rt_prio_array *array = &rq->rt.active;
138 struct list_head *head, *curr;
139 struct task_struct *p;
140 int idx;
141
142 idx = rq->rt.rt_load_balance_idx;
143 head = rq->rt.rt_load_balance_head;
144 curr = rq->rt.rt_load_balance_curr;
145
146 /*
147 * If we arrived back to the head again then
148 * iterate to the next queue (if any):
149 */
150 if (unlikely(head == curr)) {
151 int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
152
153 if (next_idx >= MAX_RT_PRIO)
154 return NULL;
155
156 idx = next_idx;
157 head = array->queue + idx;
158 curr = head->prev;
159
160 rq->rt.rt_load_balance_idx = idx;
161 rq->rt.rt_load_balance_head = head;
162 }
163
164 p = list_entry(curr, struct task_struct, run_list);
165
166 curr = curr->prev;
167
168 rq->rt.rt_load_balance_curr = curr;
169
170 return p;
171}
172
Peter Williams43010652007-08-09 11:16:46 +0200173static unsigned long
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200174load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
175 unsigned long max_nr_move, unsigned long max_load_move,
176 struct sched_domain *sd, enum cpu_idle_type idle,
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200177 int *all_pinned, int *this_best_prio)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200178{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200179 int nr_moved;
180 struct rq_iterator rt_rq_iterator;
Peter Williams43010652007-08-09 11:16:46 +0200181 unsigned long load_moved;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200182
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200183 rt_rq_iterator.start = load_balance_start_rt;
184 rt_rq_iterator.next = load_balance_next_rt;
185 /* pass 'busiest' rq argument into
186 * load_balance_[start|next]_rt iterators
187 */
188 rt_rq_iterator.arg = busiest;
189
190 nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
Peter Williams43010652007-08-09 11:16:46 +0200191 max_load_move, sd, idle, all_pinned, &load_moved,
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200192 this_best_prio, &rt_rq_iterator);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200193
Peter Williams43010652007-08-09 11:16:46 +0200194 return load_moved;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200195}
196
197static void task_tick_rt(struct rq *rq, struct task_struct *p)
198{
199 /*
200 * RR tasks need a special form of timeslice management.
201 * FIFO tasks have no timeslices.
202 */
203 if (p->policy != SCHED_RR)
204 return;
205
206 if (--p->time_slice)
207 return;
208
209 p->time_slice = static_prio_timeslice(p->static_prio);
210 set_tsk_need_resched(p);
211
212 /* put it at the end of the queue: */
213 requeue_task_rt(rq, p);
214}
215
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200216static struct sched_class rt_sched_class __read_mostly = {
217 .enqueue_task = enqueue_task_rt,
218 .dequeue_task = dequeue_task_rt,
219 .yield_task = yield_task_rt,
220
221 .check_preempt_curr = check_preempt_curr_rt,
222
223 .pick_next_task = pick_next_task_rt,
224 .put_prev_task = put_prev_task_rt,
225
226 .load_balance = load_balance_rt,
227
228 .task_tick = task_tick_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200229};