blob: 19ecb31273795ad3633c2bb9e7cbda8501d8074d [file] [log] [blame]
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
Gregory Haskins398a1532009-01-14 09:10:04 -05006#ifdef CONFIG_RT_GROUP_SCHED
7
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +02008#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
Peter Zijlstra8f488942009-07-24 12:25:30 +020010static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
11{
12#ifdef CONFIG_SCHED_DEBUG
13 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14#endif
15 return container_of(rt_se, struct task_struct, rt);
16}
17
Gregory Haskins398a1532009-01-14 09:10:04 -050018static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
19{
20 return rt_rq->rq;
21}
22
23static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
24{
25 return rt_se->rt_rq;
26}
27
28#else /* CONFIG_RT_GROUP_SCHED */
29
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +020030#define rt_entity_is_task(rt_se) (1)
31
Peter Zijlstra8f488942009-07-24 12:25:30 +020032static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33{
34 return container_of(rt_se, struct task_struct, rt);
35}
36
Gregory Haskins398a1532009-01-14 09:10:04 -050037static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
38{
39 return container_of(rt_rq, struct rq, rt);
40}
41
42static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
43{
44 struct task_struct *p = rt_task_of(rt_se);
45 struct rq *rq = task_rq(p);
46
47 return &rq->rt;
48}
49
50#endif /* CONFIG_RT_GROUP_SCHED */
51
Steven Rostedt4fd29172008-01-25 21:08:06 +010052#ifdef CONFIG_SMP
Ingo Molnar84de4272008-01-25 21:08:15 +010053
Gregory Haskins637f5082008-01-25 21:08:18 +010054static inline int rt_overloaded(struct rq *rq)
Steven Rostedt4fd29172008-01-25 21:08:06 +010055{
Gregory Haskins637f5082008-01-25 21:08:18 +010056 return atomic_read(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +010057}
Ingo Molnar84de4272008-01-25 21:08:15 +010058
Steven Rostedt4fd29172008-01-25 21:08:06 +010059static inline void rt_set_overload(struct rq *rq)
60{
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -040061 if (!rq->online)
62 return;
63
Rusty Russellc6c49272008-11-25 02:35:05 +103064 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +010065 /*
66 * Make sure the mask is visible before we set
67 * the overload count. That is checked to determine
68 * if we should look at the mask. It would be a shame
69 * if we looked at the mask, but the mask was not
70 * updated yet.
71 */
72 wmb();
Gregory Haskins637f5082008-01-25 21:08:18 +010073 atomic_inc(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +010074}
Ingo Molnar84de4272008-01-25 21:08:15 +010075
Steven Rostedt4fd29172008-01-25 21:08:06 +010076static inline void rt_clear_overload(struct rq *rq)
77{
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -040078 if (!rq->online)
79 return;
80
Steven Rostedt4fd29172008-01-25 21:08:06 +010081 /* the order here really doesn't matter */
Gregory Haskins637f5082008-01-25 21:08:18 +010082 atomic_dec(&rq->rd->rto_count);
Rusty Russellc6c49272008-11-25 02:35:05 +103083 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +010084}
Gregory Haskins73fe6aa2008-01-25 21:08:07 +010085
Gregory Haskins398a1532009-01-14 09:10:04 -050086static void update_rt_migration(struct rt_rq *rt_rq)
Gregory Haskins73fe6aa2008-01-25 21:08:07 +010087{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +020088 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
Gregory Haskins398a1532009-01-14 09:10:04 -050089 if (!rt_rq->overloaded) {
90 rt_set_overload(rq_of_rt_rq(rt_rq));
91 rt_rq->overloaded = 1;
Gregory Haskinscdc8eb92008-01-25 21:08:23 +010092 }
Gregory Haskins398a1532009-01-14 09:10:04 -050093 } else if (rt_rq->overloaded) {
94 rt_clear_overload(rq_of_rt_rq(rt_rq));
95 rt_rq->overloaded = 0;
Gregory Haskins637f5082008-01-25 21:08:18 +010096 }
Gregory Haskins73fe6aa2008-01-25 21:08:07 +010097}
Steven Rostedt4fd29172008-01-25 21:08:06 +010098
Gregory Haskins398a1532009-01-14 09:10:04 -050099static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100100{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200101 if (!rt_entity_is_task(rt_se))
102 return;
103
104 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
105
106 rt_rq->rt_nr_total++;
Gregory Haskins398a1532009-01-14 09:10:04 -0500107 if (rt_se->nr_cpus_allowed > 1)
108 rt_rq->rt_nr_migratory++;
109
110 update_rt_migration(rt_rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100111}
112
Gregory Haskins398a1532009-01-14 09:10:04 -0500113static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
114{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200115 if (!rt_entity_is_task(rt_se))
116 return;
117
118 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
119
120 rt_rq->rt_nr_total--;
Gregory Haskins398a1532009-01-14 09:10:04 -0500121 if (rt_se->nr_cpus_allowed > 1)
122 rt_rq->rt_nr_migratory--;
123
124 update_rt_migration(rt_rq);
125}
126
Gregory Haskins917b6272008-12-29 09:39:53 -0500127static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
128{
129 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
130 plist_node_init(&p->pushable_tasks, p->prio);
131 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
132}
133
134static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
135{
136 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
137}
138
Ingo Molnarbcf08df2008-04-19 12:11:10 +0200139static inline int has_pushable_tasks(struct rq *rq)
140{
141 return !plist_head_empty(&rq->rt.pushable_tasks);
142}
143
Gregory Haskins917b6272008-12-29 09:39:53 -0500144#else
145
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100146static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
147{
148}
149
150static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
151{
152}
153
Gregory Haskinsb07430a2009-01-14 08:55:39 -0500154static inline
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100155void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
156{
157}
158
Gregory Haskinsb07430a2009-01-14 08:55:39 -0500159static inline
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100160void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
161{
162}
Gregory Haskins917b6272008-12-29 09:39:53 -0500163
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200164#endif /* CONFIG_SMP */
165
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100166static inline int on_rt_rq(struct sched_rt_entity *rt_se)
167{
168 return !list_empty(&rt_se->run_list);
169}
170
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100171#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100172
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100173static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100174{
175 if (!rt_rq->tg)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100176 return RUNTIME_INF;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100177
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200178 return rt_rq->rt_runtime;
179}
180
181static inline u64 sched_rt_period(struct rt_rq *rt_rq)
182{
183 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100184}
185
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800186static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
187{
188 list_add_rcu(&rt_rq->leaf_rt_rq_list,
189 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
190}
191
192static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
193{
194 list_del_rcu(&rt_rq->leaf_rt_rq_list);
195}
196
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100197#define for_each_leaf_rt_rq(rt_rq, rq) \
Bharata B Rao80f40ee2008-12-15 11:56:48 +0530198 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100199
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100200#define for_each_sched_rt_entity(rt_se) \
201 for (; rt_se; rt_se = rt_se->parent)
202
203static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
204{
205 return rt_se->my_q;
206}
207
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000208static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100209static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
210
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100211static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100212{
Dario Faggiolif6121f42008-10-03 17:40:46 +0200213 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
Yong Zhang74b7eb52010-01-29 14:57:52 +0800214 struct sched_rt_entity *rt_se;
215
Balbir Singh0c3b9162011-03-03 17:04:35 +0530216 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
217
218 rt_se = rt_rq->tg->rt_se[cpu];
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100219
Dario Faggiolif6121f42008-10-03 17:40:46 +0200220 if (rt_rq->rt_nr_running) {
221 if (rt_se && !on_rt_rq(rt_se))
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000222 enqueue_rt_entity(rt_se, false);
Gregory Haskinse864c492008-12-29 09:39:49 -0500223 if (rt_rq->highest_prio.curr < curr->prio)
Peter Zijlstra10203872008-01-25 21:08:32 +0100224 resched_task(curr);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100225 }
226}
227
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100228static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100229{
Yong Zhang74b7eb52010-01-29 14:57:52 +0800230 struct sched_rt_entity *rt_se;
Balbir Singh0c3b9162011-03-03 17:04:35 +0530231 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
Yong Zhang74b7eb52010-01-29 14:57:52 +0800232
Balbir Singh0c3b9162011-03-03 17:04:35 +0530233 rt_se = rt_rq->tg->rt_se[cpu];
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100234
235 if (rt_se && on_rt_rq(rt_se))
236 dequeue_rt_entity(rt_se);
237}
238
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100239static inline int rt_rq_throttled(struct rt_rq *rt_rq)
240{
241 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
242}
243
244static int rt_se_boosted(struct sched_rt_entity *rt_se)
245{
246 struct rt_rq *rt_rq = group_rt_rq(rt_se);
247 struct task_struct *p;
248
249 if (rt_rq)
250 return !!rt_rq->rt_nr_boosted;
251
252 p = rt_task_of(rt_se);
253 return p->prio != p->normal_prio;
254}
255
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200256#ifdef CONFIG_SMP
Rusty Russellc6c49272008-11-25 02:35:05 +1030257static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200258{
259 return cpu_rq(smp_processor_id())->rd->span;
260}
261#else
Rusty Russellc6c49272008-11-25 02:35:05 +1030262static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200263{
Rusty Russellc6c49272008-11-25 02:35:05 +1030264 return cpu_online_mask;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200265}
266#endif
267
268static inline
269struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
270{
271 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
272}
273
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200274static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
275{
276 return &rt_rq->tg->rt_bandwidth;
277}
278
Dhaval Giani55e12e52008-06-24 23:39:43 +0530279#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100280
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100281static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100282{
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200283 return rt_rq->rt_runtime;
284}
285
286static inline u64 sched_rt_period(struct rt_rq *rt_rq)
287{
288 return ktime_to_ns(def_rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100289}
290
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800291static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
292{
293}
294
295static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
296{
297}
298
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100299#define for_each_leaf_rt_rq(rt_rq, rq) \
300 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
301
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100302#define for_each_sched_rt_entity(rt_se) \
303 for (; rt_se; rt_se = NULL)
304
305static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
306{
307 return NULL;
308}
309
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100310static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100311{
John Blackwoodf3ade832008-08-26 15:09:43 -0400312 if (rt_rq->rt_nr_running)
313 resched_task(rq_of_rt_rq(rt_rq)->curr);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100314}
315
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100316static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100317{
318}
319
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100320static inline int rt_rq_throttled(struct rt_rq *rt_rq)
321{
322 return rt_rq->rt_throttled;
323}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200324
Rusty Russellc6c49272008-11-25 02:35:05 +1030325static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200326{
Rusty Russellc6c49272008-11-25 02:35:05 +1030327 return cpu_online_mask;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200328}
329
330static inline
331struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
332{
333 return &cpu_rq(cpu)->rt;
334}
335
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200336static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
337{
338 return &def_rt_bandwidth;
339}
340
Dhaval Giani55e12e52008-06-24 23:39:43 +0530341#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100342
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200343#ifdef CONFIG_SMP
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200344/*
345 * We ran out of runtime, see if we can borrow some from our neighbours.
346 */
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200347static int do_balance_runtime(struct rt_rq *rt_rq)
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200348{
349 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
350 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
351 int i, weight, more = 0;
352 u64 rt_period;
353
Rusty Russellc6c49272008-11-25 02:35:05 +1030354 weight = cpumask_weight(rd->span);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200355
Thomas Gleixner0986b112009-11-17 15:32:06 +0100356 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200357 rt_period = ktime_to_ns(rt_b->rt_period);
Rusty Russellc6c49272008-11-25 02:35:05 +1030358 for_each_cpu(i, rd->span) {
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200359 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
360 s64 diff;
361
362 if (iter == rt_rq)
363 continue;
364
Thomas Gleixner0986b112009-11-17 15:32:06 +0100365 raw_spin_lock(&iter->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200366 /*
367 * Either all rqs have inf runtime and there's nothing to steal
368 * or __disable_runtime() below sets a specific rq to inf to
369 * indicate its been disabled and disalow stealing.
370 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200371 if (iter->rt_runtime == RUNTIME_INF)
372 goto next;
373
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200374 /*
375 * From runqueues with spare time, take 1/n part of their
376 * spare time, but no more than our period.
377 */
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200378 diff = iter->rt_runtime - iter->rt_time;
379 if (diff > 0) {
Peter Zijlstra58838cf2008-07-24 12:43:13 +0200380 diff = div_u64((u64)diff, weight);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200381 if (rt_rq->rt_runtime + diff > rt_period)
382 diff = rt_period - rt_rq->rt_runtime;
383 iter->rt_runtime -= diff;
384 rt_rq->rt_runtime += diff;
385 more = 1;
386 if (rt_rq->rt_runtime == rt_period) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100387 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200388 break;
389 }
390 }
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200391next:
Thomas Gleixner0986b112009-11-17 15:32:06 +0100392 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200393 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100394 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200395
396 return more;
397}
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200398
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200399/*
400 * Ensure this RQ takes back all the runtime it lend to its neighbours.
401 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200402static void __disable_runtime(struct rq *rq)
403{
404 struct root_domain *rd = rq->rd;
405 struct rt_rq *rt_rq;
406
407 if (unlikely(!scheduler_running))
408 return;
409
410 for_each_leaf_rt_rq(rt_rq, rq) {
411 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
412 s64 want;
413 int i;
414
Thomas Gleixner0986b112009-11-17 15:32:06 +0100415 raw_spin_lock(&rt_b->rt_runtime_lock);
416 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200417 /*
418 * Either we're all inf and nobody needs to borrow, or we're
419 * already disabled and thus have nothing to do, or we have
420 * exactly the right amount of runtime to take out.
421 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200422 if (rt_rq->rt_runtime == RUNTIME_INF ||
423 rt_rq->rt_runtime == rt_b->rt_runtime)
424 goto balanced;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100425 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200426
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200427 /*
428 * Calculate the difference between what we started out with
429 * and what we current have, that's the amount of runtime
430 * we lend and now have to reclaim.
431 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200432 want = rt_b->rt_runtime - rt_rq->rt_runtime;
433
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200434 /*
435 * Greedy reclaim, take back as much as we can.
436 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030437 for_each_cpu(i, rd->span) {
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200438 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
439 s64 diff;
440
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200441 /*
442 * Can't reclaim from ourselves or disabled runqueues.
443 */
Peter Zijlstraf1679d02008-08-14 15:49:00 +0200444 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200445 continue;
446
Thomas Gleixner0986b112009-11-17 15:32:06 +0100447 raw_spin_lock(&iter->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200448 if (want > 0) {
449 diff = min_t(s64, iter->rt_runtime, want);
450 iter->rt_runtime -= diff;
451 want -= diff;
452 } else {
453 iter->rt_runtime -= want;
454 want -= want;
455 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100456 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200457
458 if (!want)
459 break;
460 }
461
Thomas Gleixner0986b112009-11-17 15:32:06 +0100462 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200463 /*
464 * We cannot be left wanting - that would mean some runtime
465 * leaked out of the system.
466 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200467 BUG_ON(want);
468balanced:
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200469 /*
470 * Disable all the borrow logic by pretending we have inf
471 * runtime - in which case borrowing doesn't make sense.
472 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200473 rt_rq->rt_runtime = RUNTIME_INF;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100474 raw_spin_unlock(&rt_rq->rt_runtime_lock);
475 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200476 }
477}
478
479static void disable_runtime(struct rq *rq)
480{
481 unsigned long flags;
482
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100483 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200484 __disable_runtime(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100485 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200486}
487
488static void __enable_runtime(struct rq *rq)
489{
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200490 struct rt_rq *rt_rq;
491
492 if (unlikely(!scheduler_running))
493 return;
494
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200495 /*
496 * Reset each runqueue's bandwidth settings
497 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200498 for_each_leaf_rt_rq(rt_rq, rq) {
499 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
500
Thomas Gleixner0986b112009-11-17 15:32:06 +0100501 raw_spin_lock(&rt_b->rt_runtime_lock);
502 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200503 rt_rq->rt_runtime = rt_b->rt_runtime;
504 rt_rq->rt_time = 0;
Zhang, Yanminbaf25732008-09-09 11:26:33 +0800505 rt_rq->rt_throttled = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100506 raw_spin_unlock(&rt_rq->rt_runtime_lock);
507 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200508 }
509}
510
511static void enable_runtime(struct rq *rq)
512{
513 unsigned long flags;
514
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100515 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200516 __enable_runtime(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100517 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200518}
519
Peter Zijlstraeff65492008-06-19 14:22:26 +0200520static int balance_runtime(struct rt_rq *rt_rq)
521{
522 int more = 0;
523
524 if (rt_rq->rt_time > rt_rq->rt_runtime) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100525 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200526 more = do_balance_runtime(rt_rq);
Thomas Gleixner0986b112009-11-17 15:32:06 +0100527 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200528 }
529
530 return more;
531}
Dhaval Giani55e12e52008-06-24 23:39:43 +0530532#else /* !CONFIG_SMP */
Peter Zijlstraeff65492008-06-19 14:22:26 +0200533static inline int balance_runtime(struct rt_rq *rt_rq)
534{
535 return 0;
536}
Dhaval Giani55e12e52008-06-24 23:39:43 +0530537#endif /* CONFIG_SMP */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100538
539static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
540{
541 int i, idle = 1;
Rusty Russellc6c49272008-11-25 02:35:05 +1030542 const struct cpumask *span;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200543
Peter Zijlstra0b148fa2008-08-19 12:33:04 +0200544 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200545 return 1;
546
547 span = sched_rt_period_mask();
Rusty Russellc6c49272008-11-25 02:35:05 +1030548 for_each_cpu(i, span) {
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200549 int enqueue = 0;
550 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
551 struct rq *rq = rq_of_rt_rq(rt_rq);
552
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100553 raw_spin_lock(&rq->lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200554 if (rt_rq->rt_time) {
555 u64 runtime;
556
Thomas Gleixner0986b112009-11-17 15:32:06 +0100557 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200558 if (rt_rq->rt_throttled)
559 balance_runtime(rt_rq);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200560 runtime = rt_rq->rt_runtime;
561 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
562 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
563 rt_rq->rt_throttled = 0;
564 enqueue = 1;
565 }
566 if (rt_rq->rt_time || rt_rq->rt_nr_running)
567 idle = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100568 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Balbir Singh0c3b9162011-03-03 17:04:35 +0530569 } else if (rt_rq->rt_nr_running) {
Peter Zijlstra8a8cde12008-06-19 14:22:28 +0200570 idle = 0;
Balbir Singh0c3b9162011-03-03 17:04:35 +0530571 if (!rt_rq_throttled(rt_rq))
572 enqueue = 1;
573 }
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200574
575 if (enqueue)
576 sched_rt_rq_enqueue(rt_rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100577 raw_spin_unlock(&rq->lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200578 }
579
580 return idle;
581}
582
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100583static inline int rt_se_prio(struct sched_rt_entity *rt_se)
584{
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100585#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100586 struct rt_rq *rt_rq = group_rt_rq(rt_se);
587
588 if (rt_rq)
Gregory Haskinse864c492008-12-29 09:39:49 -0500589 return rt_rq->highest_prio.curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100590#endif
591
592 return rt_task_of(rt_se)->prio;
593}
594
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100595static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100596{
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100597 u64 runtime = sched_rt_runtime(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100598
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100599 if (rt_rq->rt_throttled)
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100600 return rt_rq_throttled(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100601
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200602 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
603 return 0;
604
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200605 balance_runtime(rt_rq);
606 runtime = sched_rt_runtime(rt_rq);
607 if (runtime == RUNTIME_INF)
608 return 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200609
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100610 if (rt_rq->rt_time > runtime) {
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100611 rt_rq->rt_throttled = 1;
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100612 if (rt_rq_throttled(rt_rq)) {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100613 sched_rt_rq_dequeue(rt_rq);
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100614 return 1;
615 }
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100616 }
617
618 return 0;
619}
620
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200621/*
622 * Update the current task's runtime statistics. Skip current tasks that
623 * are not in our scheduling class.
624 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200625static void update_curr_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200626{
627 struct task_struct *curr = rq->curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100628 struct sched_rt_entity *rt_se = &curr->rt;
629 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200630 u64 delta_exec;
631
Peter Zijlstra06c3bc62011-02-02 13:19:48 +0100632 if (curr->sched_class != &rt_sched_class)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200633 return;
634
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700635 delta_exec = rq->clock_task - curr->se.exec_start;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200636 if (unlikely((s64)delta_exec < 0))
637 delta_exec = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200638
Lucas De Marchi41acab82010-03-10 23:37:45 -0300639 schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200640
641 curr->se.sum_exec_runtime += delta_exec;
Frank Mayharf06febc2008-09-12 09:54:39 -0700642 account_group_exec_runtime(curr, delta_exec);
643
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700644 curr->se.exec_start = rq->clock_task;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100645 cpuacct_charge(curr, delta_exec);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100646
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200647 sched_rt_avg_update(rq, delta_exec);
648
Peter Zijlstra0b148fa2008-08-19 12:33:04 +0200649 if (!rt_bandwidth_enabled())
650 return;
651
Dhaval Giani354d60c2008-04-19 19:44:59 +0200652 for_each_sched_rt_entity(rt_se) {
653 rt_rq = rt_rq_of_se(rt_se);
654
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200655 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100656 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200657 rt_rq->rt_time += delta_exec;
658 if (sched_rt_runtime_exceeded(rt_rq))
659 resched_task(curr);
Thomas Gleixner0986b112009-11-17 15:32:06 +0100660 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200661 }
Dhaval Giani354d60c2008-04-19 19:44:59 +0200662 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200663}
664
Gregory Haskins398a1532009-01-14 09:10:04 -0500665#if defined CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500666
667static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
668
669static inline int next_prio(struct rq *rq)
Steven Rostedt63489e42008-01-25 21:08:03 +0100670{
Gregory Haskinse864c492008-12-29 09:39:49 -0500671 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -0400672
Gregory Haskinse864c492008-12-29 09:39:49 -0500673 if (next && rt_prio(next->prio))
674 return next->prio;
675 else
676 return MAX_RT_PRIO;
677}
Gregory Haskinse864c492008-12-29 09:39:49 -0500678
Gregory Haskins398a1532009-01-14 09:10:04 -0500679static void
680inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200681{
Gregory Haskins4d984272008-12-29 09:39:49 -0500682 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins4d984272008-12-29 09:39:49 -0500683
Gregory Haskins398a1532009-01-14 09:10:04 -0500684 if (prio < prev_prio) {
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100685
Gregory Haskinse864c492008-12-29 09:39:49 -0500686 /*
687 * If the new task is higher in priority than anything on the
Gregory Haskins398a1532009-01-14 09:10:04 -0500688 * run-queue, we know that the previous high becomes our
689 * next-highest.
Gregory Haskinse864c492008-12-29 09:39:49 -0500690 */
Gregory Haskins398a1532009-01-14 09:10:04 -0500691 rt_rq->highest_prio.next = prev_prio;
692
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -0400693 if (rq->online)
Gregory Haskins4d984272008-12-29 09:39:49 -0500694 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
Ingo Molnar1100ac92008-06-05 12:25:37 +0200695
Gregory Haskinse864c492008-12-29 09:39:49 -0500696 } else if (prio == rt_rq->highest_prio.curr)
697 /*
698 * If the next task is equal in priority to the highest on
699 * the run-queue, then we implicitly know that the next highest
700 * task cannot be any lower than current
701 */
702 rt_rq->highest_prio.next = prio;
703 else if (prio < rt_rq->highest_prio.next)
704 /*
705 * Otherwise, we need to recompute next-highest
706 */
707 rt_rq->highest_prio.next = next_prio(rq);
Steven Rostedt63489e42008-01-25 21:08:03 +0100708}
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100709
Gregory Haskins398a1532009-01-14 09:10:04 -0500710static void
711dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
Steven Rostedt63489e42008-01-25 21:08:03 +0100712{
Gregory Haskins4d984272008-12-29 09:39:49 -0500713 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200714
Gregory Haskins398a1532009-01-14 09:10:04 -0500715 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
716 rt_rq->highest_prio.next = next_prio(rq);
717
718 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
719 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
720}
721
722#else /* CONFIG_SMP */
723
724static inline
725void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
726static inline
727void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
728
729#endif /* CONFIG_SMP */
730
Steven Rostedt63489e42008-01-25 21:08:03 +0100731#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskins398a1532009-01-14 09:10:04 -0500732static void
733inc_rt_prio(struct rt_rq *rt_rq, int prio)
734{
735 int prev_prio = rt_rq->highest_prio.curr;
Steven Rostedt63489e42008-01-25 21:08:03 +0100736
Gregory Haskins398a1532009-01-14 09:10:04 -0500737 if (prio < prev_prio)
738 rt_rq->highest_prio.curr = prio;
739
740 inc_rt_prio_smp(rt_rq, prio, prev_prio);
741}
742
743static void
744dec_rt_prio(struct rt_rq *rt_rq, int prio)
745{
746 int prev_prio = rt_rq->highest_prio.curr;
747
748 if (rt_rq->rt_nr_running) {
749
750 WARN_ON(prio < prev_prio);
Gregory Haskinse864c492008-12-29 09:39:49 -0500751
752 /*
Gregory Haskins398a1532009-01-14 09:10:04 -0500753 * This may have been our highest task, and therefore
754 * we may have some recomputation to do
Gregory Haskinse864c492008-12-29 09:39:49 -0500755 */
Gregory Haskins398a1532009-01-14 09:10:04 -0500756 if (prio == prev_prio) {
Gregory Haskinse864c492008-12-29 09:39:49 -0500757 struct rt_prio_array *array = &rt_rq->active;
758
759 rt_rq->highest_prio.curr =
Steven Rostedt764a9d62008-01-25 21:08:04 +0100760 sched_find_first_bit(array->bitmap);
Gregory Haskinse864c492008-12-29 09:39:49 -0500761 }
762
Steven Rostedt764a9d62008-01-25 21:08:04 +0100763 } else
Gregory Haskinse864c492008-12-29 09:39:49 -0500764 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100765
Gregory Haskins398a1532009-01-14 09:10:04 -0500766 dec_rt_prio_smp(rt_rq, prio, prev_prio);
767}
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -0400768
Gregory Haskins398a1532009-01-14 09:10:04 -0500769#else
770
771static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
772static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
773
774#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
775
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100776#ifdef CONFIG_RT_GROUP_SCHED
Gregory Haskins398a1532009-01-14 09:10:04 -0500777
778static void
779inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
780{
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100781 if (rt_se_boosted(rt_se))
Steven Rostedt764a9d62008-01-25 21:08:04 +0100782 rt_rq->rt_nr_boosted++;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100783
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100784 if (rt_rq->tg)
785 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
Gregory Haskins398a1532009-01-14 09:10:04 -0500786}
787
788static void
789dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
790{
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100791 if (rt_se_boosted(rt_se))
792 rt_rq->rt_nr_boosted--;
793
794 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
Gregory Haskins398a1532009-01-14 09:10:04 -0500795}
796
797#else /* CONFIG_RT_GROUP_SCHED */
798
799static void
800inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
801{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200802 start_rt_bandwidth(&def_rt_bandwidth);
Gregory Haskins398a1532009-01-14 09:10:04 -0500803}
804
805static inline
806void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
807
808#endif /* CONFIG_RT_GROUP_SCHED */
809
810static inline
811void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
812{
813 int prio = rt_se_prio(rt_se);
814
815 WARN_ON(!rt_prio(prio));
816 rt_rq->rt_nr_running++;
817
818 inc_rt_prio(rt_rq, prio);
819 inc_rt_migration(rt_se, rt_rq);
820 inc_rt_group(rt_se, rt_rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200821}
822
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100823static inline
824void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
825{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200826 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100827 WARN_ON(!rt_rq->rt_nr_running);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200828 rt_rq->rt_nr_running--;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200829
Gregory Haskins398a1532009-01-14 09:10:04 -0500830 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
831 dec_rt_migration(rt_se, rt_rq);
832 dec_rt_group(rt_se, rt_rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200833}
834
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000835static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200836{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100837 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
838 struct rt_prio_array *array = &rt_rq->active;
839 struct rt_rq *group_rq = group_rt_rq(rt_se);
Dmitry Adamushko20b63312008-06-11 00:58:30 +0200840 struct list_head *queue = array->queue + rt_se_prio(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200841
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200842 /*
843 * Don't enqueue the group if its throttled, or when empty.
844 * The latter is a consequence of the former when a child group
845 * get throttled and the current group doesn't have any other
846 * active members.
847 */
848 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100849 return;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200850
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800851 if (!rt_rq->rt_nr_running)
852 list_add_leaf_rt_rq(rt_rq);
853
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000854 if (head)
855 list_add(&rt_se->run_list, queue);
856 else
857 list_add_tail(&rt_se->run_list, queue);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100858 __set_bit(rt_se_prio(rt_se), array->bitmap);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100859
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100860 inc_rt_tasks(rt_se, rt_rq);
861}
862
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200863static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100864{
865 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
866 struct rt_prio_array *array = &rt_rq->active;
867
868 list_del_init(&rt_se->run_list);
869 if (list_empty(array->queue + rt_se_prio(rt_se)))
870 __clear_bit(rt_se_prio(rt_se), array->bitmap);
871
872 dec_rt_tasks(rt_se, rt_rq);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800873 if (!rt_rq->rt_nr_running)
874 list_del_leaf_rt_rq(rt_rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100875}
876
877/*
878 * Because the prio of an upper entry depends on the lower
879 * entries, we must remove entries top - down.
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100880 */
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200881static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100882{
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200883 struct sched_rt_entity *back = NULL;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100884
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200885 for_each_sched_rt_entity(rt_se) {
886 rt_se->back = back;
887 back = rt_se;
888 }
889
890 for (rt_se = back; rt_se; rt_se = rt_se->back) {
891 if (on_rt_rq(rt_se))
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200892 __dequeue_rt_entity(rt_se);
893 }
894}
895
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000896static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200897{
898 dequeue_rt_stack(rt_se);
899 for_each_sched_rt_entity(rt_se)
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000900 __enqueue_rt_entity(rt_se, head);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200901}
902
903static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
904{
905 dequeue_rt_stack(rt_se);
906
907 for_each_sched_rt_entity(rt_se) {
908 struct rt_rq *rt_rq = group_rt_rq(rt_se);
909
910 if (rt_rq && rt_rq->rt_nr_running)
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000911 __enqueue_rt_entity(rt_se, false);
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200912 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200913}
914
915/*
916 * Adding/removing a task to/from a priority array:
917 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +0000918static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100919enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100920{
921 struct sched_rt_entity *rt_se = &p->rt;
922
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100923 if (flags & ENQUEUE_WAKEUP)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100924 rt_se->timeout = 0;
925
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100926 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200927
Gregory Haskins917b6272008-12-29 09:39:53 -0500928 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
929 enqueue_pushable_task(rq, p);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100930}
931
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100932static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200933{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100934 struct sched_rt_entity *rt_se = &p->rt;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200935
936 update_curr_rt(rq);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200937 dequeue_rt_entity(rt_se);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200938
Gregory Haskins917b6272008-12-29 09:39:53 -0500939 dequeue_pushable_task(rq, p);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200940}
941
942/*
943 * Put task to the end of the run list without the overhead of dequeue
944 * followed by enqueue.
945 */
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200946static void
947requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200948{
Ingo Molnar1cdad712008-06-19 09:09:15 +0200949 if (on_rt_rq(rt_se)) {
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200950 struct rt_prio_array *array = &rt_rq->active;
951 struct list_head *queue = array->queue + rt_se_prio(rt_se);
952
953 if (head)
954 list_move(&rt_se->run_list, queue);
955 else
956 list_move_tail(&rt_se->run_list, queue);
Ingo Molnar1cdad712008-06-19 09:09:15 +0200957 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200958}
959
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200960static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100961{
962 struct sched_rt_entity *rt_se = &p->rt;
963 struct rt_rq *rt_rq;
964
965 for_each_sched_rt_entity(rt_se) {
966 rt_rq = rt_rq_of_se(rt_se);
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200967 requeue_rt_entity(rt_rq, rt_se, head);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100968 }
969}
970
971static void yield_task_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200972{
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200973 requeue_task_rt(rq, rq->curr, 0);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200974}
975
Gregory Haskinse7693a32008-01-25 21:08:09 +0100976#ifdef CONFIG_SMP
Gregory Haskins318e0892008-01-25 21:08:10 +0100977static int find_lowest_rq(struct task_struct *task);
978
Peter Zijlstra0017d732010-03-24 18:34:10 +0100979static int
Peter Zijlstra7608dec2011-04-05 17:23:46 +0200980select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
Gregory Haskinse7693a32008-01-25 21:08:09 +0100981{
Peter Zijlstra7608dec2011-04-05 17:23:46 +0200982 struct task_struct *curr;
983 struct rq *rq;
984 int cpu;
985
Peter Zijlstra0763a662009-09-14 19:37:39 +0200986 if (sd_flag != SD_BALANCE_WAKE)
Peter Zijlstra5f3edc12009-09-10 13:42:00 +0200987 return smp_processor_id();
988
Peter Zijlstra7608dec2011-04-05 17:23:46 +0200989 cpu = task_cpu(p);
990 rq = cpu_rq(cpu);
991
992 rcu_read_lock();
993 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
994
Gregory Haskins318e0892008-01-25 21:08:10 +0100995 /*
Peter Zijlstra7608dec2011-04-05 17:23:46 +0200996 * If the current task on @p's runqueue is an RT task, then
Steven Rostedte1f47d82008-01-25 21:08:12 +0100997 * try to see if we can wake this RT task up on another
998 * runqueue. Otherwise simply start this RT task
999 * on its current runqueue.
1000 *
Steven Rostedt43fa5462010-09-20 22:40:03 -04001001 * We want to avoid overloading runqueues. If the woken
1002 * task is a higher priority, then it will stay on this CPU
1003 * and the lower prio task should be moved to another CPU.
1004 * Even though this will probably make the lower prio task
1005 * lose its cache, we do not want to bounce a higher task
1006 * around just because it gave up its CPU, perhaps for a
1007 * lock?
1008 *
1009 * For equal prio tasks, we just let the scheduler sort it out.
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001010 *
Gregory Haskins318e0892008-01-25 21:08:10 +01001011 * Otherwise, just let it ride on the affined RQ and the
1012 * post-schedule router will push the preempted task away
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001013 *
1014 * This test is optimistic, if we get it wrong the load-balancer
1015 * will have to sort it out.
Gregory Haskins318e0892008-01-25 21:08:10 +01001016 */
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001017 if (curr && unlikely(rt_task(curr)) &&
1018 (curr->rt.nr_cpus_allowed < 2 ||
1019 curr->prio < p->prio) &&
1020 (p->rt.nr_cpus_allowed > 1)) {
1021 int target = find_lowest_rq(p);
1022
1023 if (target != -1)
1024 cpu = target;
1025 }
1026 rcu_read_unlock();
1027
1028 return cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01001029}
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001030
1031static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1032{
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001033 if (rq->curr->rt.nr_cpus_allowed == 1)
1034 return;
1035
Rusty Russell13b8bd02009-03-25 15:01:22 +10301036 if (p->rt.nr_cpus_allowed != 1
1037 && cpupri_find(&rq->rd->cpupri, p, NULL))
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001038 return;
1039
Rusty Russell13b8bd02009-03-25 15:01:22 +10301040 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1041 return;
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001042
1043 /*
1044 * There appears to be other cpus that can accept
1045 * current and none to run 'p', so lets reschedule
1046 * to try and push current away:
1047 */
1048 requeue_task_rt(rq, p, 1);
1049 resched_task(rq->curr);
1050}
1051
Gregory Haskinse7693a32008-01-25 21:08:09 +01001052#endif /* CONFIG_SMP */
1053
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001054/*
1055 * Preempt the current task with a newly woken task if needed:
1056 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02001057static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001058{
Gregory Haskins45c01e82008-05-12 21:20:41 +02001059 if (p->prio < rq->curr->prio) {
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001060 resched_task(rq->curr);
Gregory Haskins45c01e82008-05-12 21:20:41 +02001061 return;
1062 }
1063
1064#ifdef CONFIG_SMP
1065 /*
1066 * If:
1067 *
1068 * - the newly woken task is of equal priority to the current task
1069 * - the newly woken task is non-migratable while current is migratable
1070 * - current will be preempted on the next reschedule
1071 *
1072 * we should check to see if current can readily move to a different
1073 * cpu. If so, we will reschedule to allow the push logic to try
1074 * to move current somewhere else, making room for our non-migratable
1075 * task.
1076 */
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001077 if (p->prio == rq->curr->prio && !need_resched())
1078 check_preempt_equal_prio(rq, p);
Gregory Haskins45c01e82008-05-12 21:20:41 +02001079#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001080}
1081
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001082static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1083 struct rt_rq *rt_rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001084{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001085 struct rt_prio_array *array = &rt_rq->active;
1086 struct sched_rt_entity *next = NULL;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001087 struct list_head *queue;
1088 int idx;
1089
1090 idx = sched_find_first_bit(array->bitmap);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001091 BUG_ON(idx >= MAX_RT_PRIO);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001092
1093 queue = array->queue + idx;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001094 next = list_entry(queue->next, struct sched_rt_entity, run_list);
Dmitry Adamushko326587b2008-01-25 21:08:34 +01001095
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001096 return next;
1097}
1098
Gregory Haskins917b6272008-12-29 09:39:53 -05001099static struct task_struct *_pick_next_task_rt(struct rq *rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001100{
1101 struct sched_rt_entity *rt_se;
1102 struct task_struct *p;
1103 struct rt_rq *rt_rq;
1104
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001105 rt_rq = &rq->rt;
1106
1107 if (unlikely(!rt_rq->rt_nr_running))
1108 return NULL;
1109
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01001110 if (rt_rq_throttled(rt_rq))
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001111 return NULL;
1112
1113 do {
1114 rt_se = pick_next_rt_entity(rq, rt_rq);
Dmitry Adamushko326587b2008-01-25 21:08:34 +01001115 BUG_ON(!rt_se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001116 rt_rq = group_rt_rq(rt_se);
1117 } while (rt_rq);
1118
1119 p = rt_task_of(rt_se);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001120 p->se.exec_start = rq->clock_task;
Gregory Haskins917b6272008-12-29 09:39:53 -05001121
1122 return p;
1123}
1124
1125static struct task_struct *pick_next_task_rt(struct rq *rq)
1126{
1127 struct task_struct *p = _pick_next_task_rt(rq);
1128
1129 /* The running task is never eligible for pushing */
1130 if (p)
1131 dequeue_pushable_task(rq, p);
1132
Ingo Molnarbcf08df2008-04-19 12:11:10 +02001133#ifdef CONFIG_SMP
Gregory Haskins3f029d32009-07-29 11:08:47 -04001134 /*
1135 * We detect this state here so that we can avoid taking the RQ
1136 * lock again later if there is no need to push
1137 */
1138 rq->post_schedule = has_pushable_tasks(rq);
Ingo Molnarbcf08df2008-04-19 12:11:10 +02001139#endif
Gregory Haskins3f029d32009-07-29 11:08:47 -04001140
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001141 return p;
1142}
1143
Ingo Molnar31ee5292007-08-09 11:16:49 +02001144static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001145{
Ingo Molnarf1e14ef2007-08-09 11:16:48 +02001146 update_curr_rt(rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001147 p->se.exec_start = 0;
Gregory Haskins917b6272008-12-29 09:39:53 -05001148
1149 /*
1150 * The previous task needs to be made eligible for pushing
1151 * if it is still active
1152 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001153 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
Gregory Haskins917b6272008-12-29 09:39:53 -05001154 enqueue_pushable_task(rq, p);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001155}
1156
Peter Williams681f3e62007-10-24 18:23:51 +02001157#ifdef CONFIG_SMP
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001158
Steven Rostedte8fa1362008-01-25 21:08:05 +01001159/* Only try algorithms three times */
1160#define RT_MAX_TRIES 3
1161
Steven Rostedte8fa1362008-01-25 21:08:05 +01001162static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1163
Steven Rostedtf65eda42008-01-25 21:08:07 +01001164static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1165{
1166 if (!task_running(rq, p) &&
Rusty Russell96f874e22008-11-25 02:35:14 +10301167 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001168 (p->rt.nr_cpus_allowed > 1))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001169 return 1;
1170 return 0;
1171}
1172
Steven Rostedte8fa1362008-01-25 21:08:05 +01001173/* Return the second highest RT task, NULL otherwise */
Ingo Molnar79064fb2008-01-25 21:08:14 +01001174static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001175{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001176 struct task_struct *next = NULL;
1177 struct sched_rt_entity *rt_se;
1178 struct rt_prio_array *array;
1179 struct rt_rq *rt_rq;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001180 int idx;
1181
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001182 for_each_leaf_rt_rq(rt_rq, rq) {
1183 array = &rt_rq->active;
1184 idx = sched_find_first_bit(array->bitmap);
Peter Zijlstra49246272010-10-17 21:46:10 +02001185next_idx:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001186 if (idx >= MAX_RT_PRIO)
1187 continue;
1188 if (next && next->prio < idx)
1189 continue;
1190 list_for_each_entry(rt_se, array->queue + idx, run_list) {
Peter Zijlstra3d074672010-03-10 17:07:24 +01001191 struct task_struct *p;
1192
1193 if (!rt_entity_is_task(rt_se))
1194 continue;
1195
1196 p = rt_task_of(rt_se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001197 if (pick_rt_task(rq, p, cpu)) {
1198 next = p;
1199 break;
1200 }
1201 }
1202 if (!next) {
1203 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1204 goto next_idx;
1205 }
Steven Rostedte8fa1362008-01-25 21:08:05 +01001206 }
1207
Steven Rostedte8fa1362008-01-25 21:08:05 +01001208 return next;
1209}
1210
Rusty Russell0e3900e2008-11-25 02:35:13 +10301211static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001212
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001213static int find_lowest_rq(struct task_struct *task)
1214{
1215 struct sched_domain *sd;
Rusty Russell96f874e22008-11-25 02:35:14 +10301216 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001217 int this_cpu = smp_processor_id();
1218 int cpu = task_cpu(task);
1219
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001220 if (task->rt.nr_cpus_allowed == 1)
1221 return -1; /* No other targets possible */
1222
1223 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
Gregory Haskins06f90db2008-01-25 21:08:13 +01001224 return -1; /* No targets found */
1225
1226 /*
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001227 * At this point we have built a mask of cpus representing the
1228 * lowest priority tasks in the system. Now we want to elect
1229 * the best one based on our affinity and topology.
1230 *
1231 * We prioritize the last cpu that the task executed on since
1232 * it is most likely cache-hot in that location.
1233 */
Rusty Russell96f874e22008-11-25 02:35:14 +10301234 if (cpumask_test_cpu(cpu, lowest_mask))
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001235 return cpu;
1236
1237 /*
1238 * Otherwise, we consult the sched_domains span maps to figure
1239 * out which cpu is logically closest to our hot cache data.
1240 */
Rusty Russelle2c88062009-11-03 14:53:15 +10301241 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1242 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001243
Rusty Russelle2c88062009-11-03 14:53:15 +10301244 for_each_domain(cpu, sd) {
1245 if (sd->flags & SD_WAKE_AFFINE) {
1246 int best_cpu;
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001247
Rusty Russelle2c88062009-11-03 14:53:15 +10301248 /*
1249 * "this_cpu" is cheaper to preempt than a
1250 * remote processor.
1251 */
1252 if (this_cpu != -1 &&
1253 cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
1254 return this_cpu;
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001255
Rusty Russelle2c88062009-11-03 14:53:15 +10301256 best_cpu = cpumask_first_and(lowest_mask,
1257 sched_domain_span(sd));
1258 if (best_cpu < nr_cpu_ids)
1259 return best_cpu;
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001260 }
1261 }
1262
1263 /*
1264 * And finally, if there were no matches within the domains
1265 * just give the caller *something* to work with from the compatible
1266 * locations.
1267 */
Rusty Russelle2c88062009-11-03 14:53:15 +10301268 if (this_cpu != -1)
1269 return this_cpu;
1270
1271 cpu = cpumask_any(lowest_mask);
1272 if (cpu < nr_cpu_ids)
1273 return cpu;
1274 return -1;
Gregory Haskins07b40322008-01-25 21:08:10 +01001275}
1276
Steven Rostedte8fa1362008-01-25 21:08:05 +01001277/* Will lock the rq it finds */
Ingo Molnar4df64c02008-01-25 21:08:15 +01001278static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001279{
1280 struct rq *lowest_rq = NULL;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001281 int tries;
Ingo Molnar4df64c02008-01-25 21:08:15 +01001282 int cpu;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001283
1284 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
Gregory Haskins07b40322008-01-25 21:08:10 +01001285 cpu = find_lowest_rq(task);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001286
Gregory Haskins2de0b462008-01-25 21:08:10 +01001287 if ((cpu == -1) || (cpu == rq->cpu))
Steven Rostedte8fa1362008-01-25 21:08:05 +01001288 break;
1289
Gregory Haskins07b40322008-01-25 21:08:10 +01001290 lowest_rq = cpu_rq(cpu);
1291
Steven Rostedte8fa1362008-01-25 21:08:05 +01001292 /* if the prio of this runqueue changed, try again */
Gregory Haskins07b40322008-01-25 21:08:10 +01001293 if (double_lock_balance(rq, lowest_rq)) {
Steven Rostedte8fa1362008-01-25 21:08:05 +01001294 /*
1295 * We had to unlock the run queue. In
1296 * the mean time, task could have
1297 * migrated already or had its affinity changed.
1298 * Also make sure that it wasn't scheduled on its rq.
1299 */
Gregory Haskins07b40322008-01-25 21:08:10 +01001300 if (unlikely(task_rq(task) != rq ||
Rusty Russell96f874e22008-11-25 02:35:14 +10301301 !cpumask_test_cpu(lowest_rq->cpu,
1302 &task->cpus_allowed) ||
Gregory Haskins07b40322008-01-25 21:08:10 +01001303 task_running(rq, task) ||
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001304 !task->on_rq)) {
Ingo Molnar4df64c02008-01-25 21:08:15 +01001305
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001306 raw_spin_unlock(&lowest_rq->lock);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001307 lowest_rq = NULL;
1308 break;
1309 }
1310 }
1311
1312 /* If this rq is still suitable use it. */
Gregory Haskinse864c492008-12-29 09:39:49 -05001313 if (lowest_rq->rt.highest_prio.curr > task->prio)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001314 break;
1315
1316 /* try again */
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001317 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001318 lowest_rq = NULL;
1319 }
1320
1321 return lowest_rq;
1322}
1323
Gregory Haskins917b6272008-12-29 09:39:53 -05001324static struct task_struct *pick_next_pushable_task(struct rq *rq)
1325{
1326 struct task_struct *p;
1327
1328 if (!has_pushable_tasks(rq))
1329 return NULL;
1330
1331 p = plist_first_entry(&rq->rt.pushable_tasks,
1332 struct task_struct, pushable_tasks);
1333
1334 BUG_ON(rq->cpu != task_cpu(p));
1335 BUG_ON(task_current(rq, p));
1336 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1337
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001338 BUG_ON(!p->on_rq);
Gregory Haskins917b6272008-12-29 09:39:53 -05001339 BUG_ON(!rt_task(p));
1340
1341 return p;
1342}
1343
Steven Rostedte8fa1362008-01-25 21:08:05 +01001344/*
1345 * If the current CPU has more than one RT task, see if the non
1346 * running task can migrate over to a CPU that is running a task
1347 * of lesser priority.
1348 */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001349static int push_rt_task(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001350{
1351 struct task_struct *next_task;
1352 struct rq *lowest_rq;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001353
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +01001354 if (!rq->rt.overloaded)
1355 return 0;
1356
Gregory Haskins917b6272008-12-29 09:39:53 -05001357 next_task = pick_next_pushable_task(rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001358 if (!next_task)
1359 return 0;
1360
Peter Zijlstra49246272010-10-17 21:46:10 +02001361retry:
Gregory Haskins697f0a42008-01-25 21:08:09 +01001362 if (unlikely(next_task == rq->curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001363 WARN_ON(1);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001364 return 0;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001365 }
Steven Rostedte8fa1362008-01-25 21:08:05 +01001366
1367 /*
1368 * It's possible that the next_task slipped in of
1369 * higher priority than current. If that's the case
1370 * just reschedule current.
1371 */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001372 if (unlikely(next_task->prio < rq->curr->prio)) {
1373 resched_task(rq->curr);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001374 return 0;
1375 }
1376
Gregory Haskins697f0a42008-01-25 21:08:09 +01001377 /* We might release rq lock */
Steven Rostedte8fa1362008-01-25 21:08:05 +01001378 get_task_struct(next_task);
1379
1380 /* find_lock_lowest_rq locks the rq if found */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001381 lowest_rq = find_lock_lowest_rq(next_task, rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001382 if (!lowest_rq) {
1383 struct task_struct *task;
1384 /*
Gregory Haskins697f0a42008-01-25 21:08:09 +01001385 * find lock_lowest_rq releases rq->lock
Gregory Haskins15635132008-12-29 09:39:53 -05001386 * so it is possible that next_task has migrated.
1387 *
1388 * We need to make sure that the task is still on the same
1389 * run-queue and is also still the next task eligible for
1390 * pushing.
Steven Rostedte8fa1362008-01-25 21:08:05 +01001391 */
Gregory Haskins917b6272008-12-29 09:39:53 -05001392 task = pick_next_pushable_task(rq);
Gregory Haskins15635132008-12-29 09:39:53 -05001393 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1394 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001395 * If we get here, the task hasn't moved at all, but
Gregory Haskins15635132008-12-29 09:39:53 -05001396 * it has failed to push. We will not try again,
1397 * since the other cpus will pull from us when they
1398 * are ready.
1399 */
1400 dequeue_pushable_task(rq, next_task);
1401 goto out;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001402 }
Gregory Haskins917b6272008-12-29 09:39:53 -05001403
Gregory Haskins15635132008-12-29 09:39:53 -05001404 if (!task)
1405 /* No more tasks, just exit */
1406 goto out;
1407
Gregory Haskins917b6272008-12-29 09:39:53 -05001408 /*
Gregory Haskins15635132008-12-29 09:39:53 -05001409 * Something has shifted, try again.
Gregory Haskins917b6272008-12-29 09:39:53 -05001410 */
Gregory Haskins15635132008-12-29 09:39:53 -05001411 put_task_struct(next_task);
1412 next_task = task;
1413 goto retry;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001414 }
1415
Gregory Haskins697f0a42008-01-25 21:08:09 +01001416 deactivate_task(rq, next_task, 0);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001417 set_task_cpu(next_task, lowest_rq->cpu);
1418 activate_task(lowest_rq, next_task, 0);
1419
1420 resched_task(lowest_rq->curr);
1421
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001422 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001423
Steven Rostedte8fa1362008-01-25 21:08:05 +01001424out:
1425 put_task_struct(next_task);
1426
Gregory Haskins917b6272008-12-29 09:39:53 -05001427 return 1;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001428}
1429
Steven Rostedte8fa1362008-01-25 21:08:05 +01001430static void push_rt_tasks(struct rq *rq)
1431{
1432 /* push_rt_task will return true if it moved an RT */
1433 while (push_rt_task(rq))
1434 ;
1435}
1436
Steven Rostedtf65eda42008-01-25 21:08:07 +01001437static int pull_rt_task(struct rq *this_rq)
1438{
Ingo Molnar80bf3172008-01-25 21:08:17 +01001439 int this_cpu = this_rq->cpu, ret = 0, cpu;
Gregory Haskinsa8728942008-12-29 09:39:49 -05001440 struct task_struct *p;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001441 struct rq *src_rq;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001442
Gregory Haskins637f5082008-01-25 21:08:18 +01001443 if (likely(!rt_overloaded(this_rq)))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001444 return 0;
1445
Rusty Russellc6c49272008-11-25 02:35:05 +10301446 for_each_cpu(cpu, this_rq->rd->rto_mask) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001447 if (this_cpu == cpu)
1448 continue;
1449
1450 src_rq = cpu_rq(cpu);
Gregory Haskins74ab8e42008-12-29 09:39:50 -05001451
1452 /*
1453 * Don't bother taking the src_rq->lock if the next highest
1454 * task is known to be lower-priority than our current task.
1455 * This may look racy, but if this value is about to go
1456 * logically higher, the src_rq will push this task away.
1457 * And if its going logically lower, we do not care
1458 */
1459 if (src_rq->rt.highest_prio.next >=
1460 this_rq->rt.highest_prio.curr)
1461 continue;
1462
Steven Rostedtf65eda42008-01-25 21:08:07 +01001463 /*
1464 * We can potentially drop this_rq's lock in
1465 * double_lock_balance, and another CPU could
Gregory Haskinsa8728942008-12-29 09:39:49 -05001466 * alter this_rq
Steven Rostedtf65eda42008-01-25 21:08:07 +01001467 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001468 double_lock_balance(this_rq, src_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001469
1470 /*
1471 * Are there still pullable RT tasks?
1472 */
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001473 if (src_rq->rt.rt_nr_running <= 1)
1474 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001475
Steven Rostedtf65eda42008-01-25 21:08:07 +01001476 p = pick_next_highest_task_rt(src_rq, this_cpu);
1477
1478 /*
1479 * Do we have an RT task that preempts
1480 * the to-be-scheduled task?
1481 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001482 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001483 WARN_ON(p == src_rq->curr);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001484 WARN_ON(!p->on_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001485
1486 /*
1487 * There's a chance that p is higher in priority
1488 * than what's currently running on its cpu.
1489 * This is just that p is wakeing up and hasn't
1490 * had a chance to schedule. We only pull
1491 * p if it is lower in priority than the
Gregory Haskinsa8728942008-12-29 09:39:49 -05001492 * current task on the run queue
Steven Rostedtf65eda42008-01-25 21:08:07 +01001493 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001494 if (p->prio < src_rq->curr->prio)
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001495 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001496
1497 ret = 1;
1498
1499 deactivate_task(src_rq, p, 0);
1500 set_task_cpu(p, this_cpu);
1501 activate_task(this_rq, p, 0);
1502 /*
1503 * We continue with the search, just in
1504 * case there's an even higher prio task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001505 * in another runqueue. (low likelihood
Steven Rostedtf65eda42008-01-25 21:08:07 +01001506 * but possible)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001507 */
Steven Rostedtf65eda42008-01-25 21:08:07 +01001508 }
Peter Zijlstra49246272010-10-17 21:46:10 +02001509skip:
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001510 double_unlock_balance(this_rq, src_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001511 }
1512
1513 return ret;
1514}
1515
Steven Rostedt9a897c52008-01-25 21:08:22 +01001516static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001517{
1518 /* Try to pull RT tasks here if we lower this rq's prio */
Gregory Haskinse864c492008-12-29 09:39:49 -05001519 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001520 pull_rt_task(rq);
1521}
1522
Steven Rostedt9a897c52008-01-25 21:08:22 +01001523static void post_schedule_rt(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001524{
Gregory Haskins967fc042008-12-29 09:39:52 -05001525 push_rt_tasks(rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001526}
1527
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001528/*
1529 * If we are not running and we are not going to reschedule soon, we should
1530 * try to push tasks away now
1531 */
Peter Zijlstraefbbd052009-12-16 18:04:40 +01001532static void task_woken_rt(struct rq *rq, struct task_struct *p)
Steven Rostedt4642daf2008-01-25 21:08:07 +01001533{
Steven Rostedt9a897c52008-01-25 21:08:22 +01001534 if (!task_running(rq, p) &&
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001535 !test_tsk_need_resched(rq->curr) &&
Gregory Haskins917b6272008-12-29 09:39:53 -05001536 has_pushable_tasks(rq) &&
Steven Rostedtb3bc2112010-09-20 22:40:04 -04001537 p->rt.nr_cpus_allowed > 1 &&
Steven Rostedt43fa5462010-09-20 22:40:03 -04001538 rt_task(rq->curr) &&
Steven Rostedtb3bc2112010-09-20 22:40:04 -04001539 (rq->curr->rt.nr_cpus_allowed < 2 ||
1540 rq->curr->prio < p->prio))
Steven Rostedt4642daf2008-01-25 21:08:07 +01001541 push_rt_tasks(rq);
1542}
1543
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001544static void set_cpus_allowed_rt(struct task_struct *p,
Rusty Russell96f874e22008-11-25 02:35:14 +10301545 const struct cpumask *new_mask)
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001546{
Rusty Russell96f874e22008-11-25 02:35:14 +10301547 int weight = cpumask_weight(new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001548
1549 BUG_ON(!rt_task(p));
1550
1551 /*
1552 * Update the migration status of the RQ if we have an RT task
1553 * which is running AND changing its weight value.
1554 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001555 if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001556 struct rq *rq = task_rq(p);
1557
Gregory Haskins917b6272008-12-29 09:39:53 -05001558 if (!task_current(rq, p)) {
1559 /*
1560 * Make sure we dequeue this task from the pushable list
1561 * before going further. It will either remain off of
1562 * the list because we are no longer pushable, or it
1563 * will be requeued.
1564 */
1565 if (p->rt.nr_cpus_allowed > 1)
1566 dequeue_pushable_task(rq, p);
1567
1568 /*
1569 * Requeue if our weight is changing and still > 1
1570 */
1571 if (weight > 1)
1572 enqueue_pushable_task(rq, p);
1573
1574 }
1575
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001576 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001577 rq->rt.rt_nr_migratory++;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001578 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001579 BUG_ON(!rq->rt.rt_nr_migratory);
1580 rq->rt.rt_nr_migratory--;
1581 }
1582
Gregory Haskins398a1532009-01-14 09:10:04 -05001583 update_rt_migration(&rq->rt);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001584 }
1585
Rusty Russell96f874e22008-11-25 02:35:14 +10301586 cpumask_copy(&p->cpus_allowed, new_mask);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001587 p->rt.nr_cpus_allowed = weight;
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001588}
Ingo Molnardeeeccd2008-01-25 21:08:15 +01001589
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001590/* Assumes rq->lock is held */
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -04001591static void rq_online_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001592{
1593 if (rq->rt.overloaded)
1594 rt_set_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001595
Peter Zijlstra7def2be2008-06-05 14:49:58 +02001596 __enable_runtime(rq);
1597
Gregory Haskinse864c492008-12-29 09:39:49 -05001598 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001599}
1600
1601/* Assumes rq->lock is held */
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -04001602static void rq_offline_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001603{
1604 if (rq->rt.overloaded)
1605 rt_clear_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001606
Peter Zijlstra7def2be2008-06-05 14:49:58 +02001607 __disable_runtime(rq);
1608
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001609 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001610}
Steven Rostedtcb469842008-01-25 21:08:22 +01001611
1612/*
1613 * When switch from the rt queue, we bring ourselves to a position
1614 * that we might want to pull RT tasks from other runqueues.
1615 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01001616static void switched_from_rt(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01001617{
1618 /*
1619 * If there are other RT tasks then we will reschedule
1620 * and the scheduling of the other RT tasks will handle
1621 * the balancing. But if we are the last RT task
1622 * we may need to handle the pulling of RT tasks
1623 * now.
1624 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001625 if (p->on_rq && !rq->rt.rt_nr_running)
Steven Rostedtcb469842008-01-25 21:08:22 +01001626 pull_rt_task(rq);
1627}
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10301628
1629static inline void init_sched_rt_class(void)
1630{
1631 unsigned int i;
1632
1633 for_each_possible_cpu(i)
Yinghai Lueaa95842009-06-06 14:51:36 -07001634 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
Mike Travis6ca09df2008-12-31 18:08:45 -08001635 GFP_KERNEL, cpu_to_node(i));
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10301636}
Steven Rostedte8fa1362008-01-25 21:08:05 +01001637#endif /* CONFIG_SMP */
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001638
Steven Rostedtcb469842008-01-25 21:08:22 +01001639/*
1640 * When switching a task to RT, we may overload the runqueue
1641 * with RT tasks. In this case we try to push them off to
1642 * other runqueues.
1643 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01001644static void switched_to_rt(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01001645{
1646 int check_resched = 1;
1647
1648 /*
1649 * If we are already running, then there's nothing
1650 * that needs to be done. But if we are not running
1651 * we may need to preempt the current running task.
1652 * If that current running task is also an RT task
1653 * then see if we can move to another run queue.
1654 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001655 if (p->on_rq && rq->curr != p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01001656#ifdef CONFIG_SMP
1657 if (rq->rt.overloaded && push_rt_task(rq) &&
1658 /* Don't resched if we changed runqueues */
1659 rq != task_rq(p))
1660 check_resched = 0;
1661#endif /* CONFIG_SMP */
1662 if (check_resched && p->prio < rq->curr->prio)
1663 resched_task(rq->curr);
1664 }
1665}
1666
1667/*
1668 * Priority of the task has changed. This may cause
1669 * us to initiate a push or pull.
1670 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01001671static void
1672prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01001673{
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001674 if (!p->on_rq)
Peter Zijlstrada7a7352011-01-17 17:03:27 +01001675 return;
1676
1677 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01001678#ifdef CONFIG_SMP
1679 /*
1680 * If our priority decreases while running, we
1681 * may need to pull tasks to this runqueue.
1682 */
1683 if (oldprio < p->prio)
1684 pull_rt_task(rq);
1685 /*
1686 * If there's a higher priority task waiting to run
Steven Rostedt6fa46fa2008-03-05 10:00:12 -05001687 * then reschedule. Note, the above pull_rt_task
1688 * can release the rq lock and p could migrate.
1689 * Only reschedule if p is still on the same runqueue.
Steven Rostedtcb469842008-01-25 21:08:22 +01001690 */
Gregory Haskinse864c492008-12-29 09:39:49 -05001691 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01001692 resched_task(p);
1693#else
1694 /* For UP simply resched on drop of prio */
1695 if (oldprio < p->prio)
1696 resched_task(p);
1697#endif /* CONFIG_SMP */
1698 } else {
1699 /*
1700 * This task is not running, but if it is
1701 * greater than the current running task
1702 * then reschedule.
1703 */
1704 if (p->prio < rq->curr->prio)
1705 resched_task(rq->curr);
1706 }
1707}
1708
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001709static void watchdog(struct rq *rq, struct task_struct *p)
1710{
1711 unsigned long soft, hard;
1712
Jiri Slaby78d7d402010-03-05 13:42:54 -08001713 /* max may change after cur was read, this will be fixed next tick */
1714 soft = task_rlimit(p, RLIMIT_RTTIME);
1715 hard = task_rlimit_max(p, RLIMIT_RTTIME);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001716
1717 if (soft != RLIM_INFINITY) {
1718 unsigned long next;
1719
1720 p->rt.timeout++;
1721 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
Peter Zijlstra5a52dd52008-01-25 21:08:32 +01001722 if (p->rt.timeout > next)
Frank Mayharf06febc2008-09-12 09:54:39 -07001723 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001724 }
1725}
Steven Rostedtcb469842008-01-25 21:08:22 +01001726
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001727static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001728{
Peter Zijlstra67e2be02007-12-20 15:01:17 +01001729 update_curr_rt(rq);
1730
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001731 watchdog(rq, p);
1732
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001733 /*
1734 * RR tasks need a special form of timeslice management.
1735 * FIFO tasks have no timeslices.
1736 */
1737 if (p->policy != SCHED_RR)
1738 return;
1739
Peter Zijlstrafa717062008-01-25 21:08:27 +01001740 if (--p->rt.time_slice)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001741 return;
1742
Peter Zijlstrafa717062008-01-25 21:08:27 +01001743 p->rt.time_slice = DEF_TIMESLICE;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001744
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001745 /*
1746 * Requeue to the end of queue if we are not the only element
1747 * on the queue:
1748 */
Peter Zijlstrafa717062008-01-25 21:08:27 +01001749 if (p->rt.run_list.prev != p->rt.run_list.next) {
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001750 requeue_task_rt(rq, p, 0);
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001751 set_tsk_need_resched(p);
1752 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001753}
1754
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001755static void set_curr_task_rt(struct rq *rq)
1756{
1757 struct task_struct *p = rq->curr;
1758
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001759 p->se.exec_start = rq->clock_task;
Gregory Haskins917b6272008-12-29 09:39:53 -05001760
1761 /* The running task is never eligible for pushing */
1762 dequeue_pushable_task(rq, p);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001763}
1764
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07001765static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00001766{
1767 /*
1768 * Time slice is 0 for SCHED_FIFO tasks
1769 */
1770 if (task->policy == SCHED_RR)
1771 return DEF_TIMESLICE;
1772 else
1773 return 0;
1774}
1775
Harvey Harrison2abdad02008-04-25 10:53:13 -07001776static const struct sched_class rt_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02001777 .next = &fair_sched_class,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001778 .enqueue_task = enqueue_task_rt,
1779 .dequeue_task = dequeue_task_rt,
1780 .yield_task = yield_task_rt,
1781
1782 .check_preempt_curr = check_preempt_curr_rt,
1783
1784 .pick_next_task = pick_next_task_rt,
1785 .put_prev_task = put_prev_task_rt,
1786
Peter Williams681f3e62007-10-24 18:23:51 +02001787#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08001788 .select_task_rq = select_task_rq_rt,
1789
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001790 .set_cpus_allowed = set_cpus_allowed_rt,
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -04001791 .rq_online = rq_online_rt,
1792 .rq_offline = rq_offline_rt,
Steven Rostedt9a897c52008-01-25 21:08:22 +01001793 .pre_schedule = pre_schedule_rt,
1794 .post_schedule = post_schedule_rt,
Peter Zijlstraefbbd052009-12-16 18:04:40 +01001795 .task_woken = task_woken_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001796 .switched_from = switched_from_rt,
Peter Williams681f3e62007-10-24 18:23:51 +02001797#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001798
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001799 .set_curr_task = set_curr_task_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001800 .task_tick = task_tick_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001801
Peter Williams0d721ce2009-09-21 01:31:53 +00001802 .get_rr_interval = get_rr_interval_rt,
1803
Steven Rostedtcb469842008-01-25 21:08:22 +01001804 .prio_changed = prio_changed_rt,
1805 .switched_to = switched_to_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001806};
Peter Zijlstraada18de2008-06-19 14:22:24 +02001807
1808#ifdef CONFIG_SCHED_DEBUG
1809extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1810
1811static void print_rt_stats(struct seq_file *m, int cpu)
1812{
1813 struct rt_rq *rt_rq;
1814
1815 rcu_read_lock();
1816 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1817 print_rt_rq(m, cpu, rt_rq);
1818 rcu_read_unlock();
1819}
Dhaval Giani55e12e52008-06-24 23:39:43 +05301820#endif /* CONFIG_SCHED_DEBUG */
Rusty Russell0e3900e2008-11-25 02:35:13 +10301821