blob: 3179d1129a80980d41a1e3ee9dd31f735768be57 [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
23/*
Peter Zijlstra21805082007-08-25 18:41:53 +020024 * Targeted preemption latency for CPU-bound tasks:
25 * (default: 20ms, units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020026 *
Peter Zijlstra21805082007-08-25 18:41:53 +020027 * NOTE: this latency value is not the same as the concept of
28 * 'timeslice length' - timeslices in CFS are of variable length.
29 * (to see the precise effective timeslice length of your workload,
30 * run vmstat and monitor the context-switches field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020031 *
32 * On SMP systems the value of this is multiplied by the log2 of the
33 * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
34 * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
Peter Zijlstra21805082007-08-25 18:41:53 +020035 * Targeted preemption latency for CPU-bound tasks:
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020036 */
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020037const_debug unsigned int sysctl_sched_latency = 20000000ULL;
38
39/*
40 * After fork, child runs first. (default) If set to 0 then
41 * parent will (try to) run first.
42 */
43const_debug unsigned int sysctl_sched_child_runs_first = 1;
Peter Zijlstra21805082007-08-25 18:41:53 +020044
45/*
46 * Minimal preemption granularity for CPU-bound tasks:
47 * (default: 2 msec, units: nanoseconds)
48 */
Ingo Molnar172ac3d2007-08-25 18:41:53 +020049unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020050
51/*
Ingo Molnar1799e352007-09-19 23:34:46 +020052 * sys_sched_yield() compat mode
53 *
54 * This option switches the agressive yield implementation of the
55 * old scheduler back on.
56 */
57unsigned int __read_mostly sysctl_sched_compat_yield;
58
59/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020060 * SCHED_BATCH wake-up granularity.
Ingo Molnar71fd3712007-08-24 20:39:10 +020061 * (default: 25 msec, units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020062 *
63 * This option delays the preemption effects of decoupled workloads
64 * and reduces their over-scheduling. Synchronous workloads will still
65 * have immediate wakeup/sleep latencies.
66 */
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020067const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020068
69/*
70 * SCHED_OTHER wake-up granularity.
71 * (default: 1 msec, units: nanoseconds)
72 *
73 * This option delays the preemption effects of decoupled workloads
74 * and reduces their over-scheduling. Synchronous workloads will still
75 * have immediate wakeup/sleep latencies.
76 */
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020077const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020078
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020079unsigned int sysctl_sched_runtime_limit __read_mostly;
80
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020081extern struct sched_class fair_sched_class;
82
83/**************************************************************
84 * CFS operations on generic schedulable entities:
85 */
86
87#ifdef CONFIG_FAIR_GROUP_SCHED
88
89/* cpu runqueue to which this cfs_rq is attached */
90static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
91{
92 return cfs_rq->rq;
93}
94
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020095/* An entity is a task if it doesn't "own" a runqueue */
96#define entity_is_task(se) (!se->my_q)
97
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020098#else /* CONFIG_FAIR_GROUP_SCHED */
99
100static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
101{
102 return container_of(cfs_rq, struct rq, cfs);
103}
104
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200105#define entity_is_task(se) 1
106
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200107#endif /* CONFIG_FAIR_GROUP_SCHED */
108
109static inline struct task_struct *task_of(struct sched_entity *se)
110{
111 return container_of(se, struct task_struct, se);
112}
113
114
115/**************************************************************
116 * Scheduling class tree data structure manipulation methods:
117 */
118
Ingo Molnare9acbff2007-10-15 17:00:04 +0200119static inline void
120set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
121{
122 struct sched_entity *se;
123
124 cfs_rq->rb_leftmost = leftmost;
125 if (leftmost) {
126 se = rb_entry(leftmost, struct sched_entity, run_node);
127 cfs_rq->min_vruntime = max(se->vruntime,
128 cfs_rq->min_vruntime);
129 }
130}
131
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200132/*
133 * Enqueue an entity into the rb-tree:
134 */
Ingo Molnar19ccd972007-10-15 17:00:04 +0200135static void
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200136__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
137{
138 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
139 struct rb_node *parent = NULL;
140 struct sched_entity *entry;
141 s64 key = se->fair_key;
142 int leftmost = 1;
143
144 /*
145 * Find the right place in the rbtree:
146 */
147 while (*link) {
148 parent = *link;
149 entry = rb_entry(parent, struct sched_entity, run_node);
150 /*
151 * We dont care about collisions. Nodes with
152 * the same key stay together.
153 */
154 if (key - entry->fair_key < 0) {
155 link = &parent->rb_left;
156 } else {
157 link = &parent->rb_right;
158 leftmost = 0;
159 }
160 }
161
162 /*
163 * Maintain a cache of leftmost tree entries (it is frequently
164 * used):
165 */
166 if (leftmost)
Ingo Molnare9acbff2007-10-15 17:00:04 +0200167 set_leftmost(cfs_rq, &se->run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200168
169 rb_link_node(&se->run_node, parent, link);
170 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
171 update_load_add(&cfs_rq->load, se->load.weight);
172 cfs_rq->nr_running++;
173 se->on_rq = 1;
Ingo Molnara206c072007-09-05 14:32:49 +0200174
175 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200176}
177
Ingo Molnar19ccd972007-10-15 17:00:04 +0200178static void
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200179__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
180{
181 if (cfs_rq->rb_leftmost == &se->run_node)
Ingo Molnare9acbff2007-10-15 17:00:04 +0200182 set_leftmost(cfs_rq, rb_next(&se->run_node));
183
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200184 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
185 update_load_sub(&cfs_rq->load, se->load.weight);
186 cfs_rq->nr_running--;
187 se->on_rq = 0;
Ingo Molnara206c072007-09-05 14:32:49 +0200188
189 schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200190}
191
192static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
193{
194 return cfs_rq->rb_leftmost;
195}
196
197static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
198{
199 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
200}
201
202/**************************************************************
203 * Scheduling class statistics methods:
204 */
205
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200206static u64 __sched_period(unsigned long nr_running)
207{
208 u64 period = sysctl_sched_latency;
209 unsigned long nr_latency =
210 sysctl_sched_latency / sysctl_sched_min_granularity;
211
212 if (unlikely(nr_running > nr_latency)) {
213 period *= nr_running;
214 do_div(period, nr_latency);
215 }
216
217 return period;
218}
219
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200220static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200221{
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200222 u64 period = __sched_period(cfs_rq->nr_running);
Peter Zijlstra21805082007-08-25 18:41:53 +0200223
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200224 period *= se->load.weight;
225 do_div(period, cfs_rq->load.weight);
Peter Zijlstra21805082007-08-25 18:41:53 +0200226
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200227 return period;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200228}
229
230static inline void
231limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
232{
233 long limit = sysctl_sched_runtime_limit;
234
235 /*
236 * Niced tasks have the same history dynamic range as
237 * non-niced tasks:
238 */
239 if (unlikely(se->wait_runtime > limit)) {
240 se->wait_runtime = limit;
241 schedstat_inc(se, wait_runtime_overruns);
242 schedstat_inc(cfs_rq, wait_runtime_overruns);
243 }
244 if (unlikely(se->wait_runtime < -limit)) {
245 se->wait_runtime = -limit;
246 schedstat_inc(se, wait_runtime_underruns);
247 schedstat_inc(cfs_rq, wait_runtime_underruns);
248 }
249}
250
251static inline void
252__add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
253{
254 se->wait_runtime += delta;
255 schedstat_add(se, sum_wait_runtime, delta);
256 limit_wait_runtime(cfs_rq, se);
257}
258
259static void
260add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
261{
262 schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
263 __add_wait_runtime(cfs_rq, se, delta);
264 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
265}
266
267/*
268 * Update the current task's runtime statistics. Skip current tasks that
269 * are not in our scheduling class.
270 */
271static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200272__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
273 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200274{
Ingo Molnare9acbff2007-10-15 17:00:04 +0200275 unsigned long delta, delta_fair, delta_mine, delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200276 struct load_weight *lw = &cfs_rq->load;
277 unsigned long load = lw->weight;
278
Ingo Molnar8179ca232007-08-02 17:41:40 +0200279 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200280
281 curr->sum_exec_runtime += delta_exec;
282 cfs_rq->exec_clock += delta_exec;
Ingo Molnare9acbff2007-10-15 17:00:04 +0200283 delta_exec_weighted = delta_exec;
284 if (unlikely(curr->load.weight != NICE_0_LOAD)) {
285 delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
286 &curr->load);
287 }
288 curr->vruntime += delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200289
Ingo Molnar6cb58192007-10-15 17:00:04 +0200290 if (!sched_feat(FAIR_SLEEPERS))
291 return;
292
Ingo Molnarfd8bb432007-08-09 11:16:46 +0200293 if (unlikely(!load))
294 return;
295
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200296 delta_fair = calc_delta_fair(delta_exec, lw);
297 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
298
Mike Galbraith5f01d512007-08-28 12:53:24 +0200299 if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
Peter Zijlstraea0aa3b2007-08-24 20:39:10 +0200300 delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
Ingo Molnarb2133c82007-08-24 20:39:10 +0200301 delta = min(delta, (unsigned long)(
302 (long)sysctl_sched_runtime_limit - curr->wait_runtime));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200303 cfs_rq->sleeper_bonus -= delta;
304 delta_mine -= delta;
305 }
306
307 cfs_rq->fair_clock += delta_fair;
308 /*
309 * We executed delta_exec amount of time on the CPU,
310 * but we were only entitled to delta_mine amount of
311 * time during that period (if nr_running == 1 then
312 * the two values are equal)
313 * [Note: delta_mine - delta_exec is negative]:
314 */
315 add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
316}
317
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200318static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200319{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200320 struct sched_entity *curr = cfs_rq->curr;
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200321 u64 now = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200322 unsigned long delta_exec;
323
324 if (unlikely(!curr))
325 return;
326
327 /*
328 * Get the amount of time the current task was running
329 * since the last time we changed load (this cannot
330 * overflow on 32 bits):
331 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200332 delta_exec = (unsigned long)(now - curr->exec_start);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200333
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200334 __update_curr(cfs_rq, curr, delta_exec);
335 curr->exec_start = now;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200336}
337
338static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200339update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200340{
341 se->wait_start_fair = cfs_rq->fair_clock;
Ingo Molnard2819182007-08-09 11:16:47 +0200342 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200343}
344
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200345static inline unsigned long
Ingo Molnar08e23882007-10-15 17:00:04 +0200346calc_weighted(unsigned long delta, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200347{
Ingo Molnar08e23882007-10-15 17:00:04 +0200348 unsigned long weight = se->load.weight;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200349
Ingo Molnar08e23882007-10-15 17:00:04 +0200350 if (unlikely(weight != NICE_0_LOAD))
351 return (u64)delta * se->load.weight >> NICE_0_SHIFT;
352 else
353 return delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200354}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200355
356/*
357 * Task is being enqueued - update stats:
358 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200359static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200360{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200361 /*
362 * Are we enqueueing a waiting task? (for current tasks
363 * a dequeue/enqueue event is a NOP)
364 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200365 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200366 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200367 /*
368 * Update the key:
369 */
Ingo Molnare9acbff2007-10-15 17:00:04 +0200370 se->fair_key = se->vruntime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200371}
372
373/*
374 * Note: must be called with a freshly updated rq->fair_clock.
375 */
376static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200377__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
378 unsigned long delta_fair)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200379{
Ingo Molnard2819182007-08-09 11:16:47 +0200380 schedstat_set(se->wait_max, max(se->wait_max,
381 rq_of(cfs_rq)->clock - se->wait_start));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200382
Ingo Molnar08e23882007-10-15 17:00:04 +0200383 delta_fair = calc_weighted(delta_fair, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200384
385 add_wait_runtime(cfs_rq, se, delta_fair);
386}
387
388static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200389update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200390{
391 unsigned long delta_fair;
392
Ingo Molnarb77d69d2007-08-28 12:53:24 +0200393 if (unlikely(!se->wait_start_fair))
394 return;
395
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200396 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
397 (u64)(cfs_rq->fair_clock - se->wait_start_fair));
398
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200399 __update_stats_wait_end(cfs_rq, se, delta_fair);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200400
401 se->wait_start_fair = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200402 schedstat_set(se->wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200403}
404
405static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200406update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200407{
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200408 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200409 /*
410 * Mark the end of the wait period if dequeueing a
411 * waiting task:
412 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200413 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200414 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200415}
416
417/*
418 * We are picking a new current task - update its stats:
419 */
420static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200421update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200422{
423 /*
424 * We are starting a new run period:
425 */
Ingo Molnard2819182007-08-09 11:16:47 +0200426 se->exec_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200427}
428
429/*
430 * We are descheduling a task - update its stats:
431 */
432static inline void
Ingo Molnarc7e9b5b2007-08-09 11:16:48 +0200433update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200434{
435 se->exec_start = 0;
436}
437
438/**************************************************
439 * Scheduling class queueing methods:
440 */
441
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200442static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
443 unsigned long delta_fair)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200444{
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200445 unsigned long load = cfs_rq->load.weight;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200446 long prev_runtime;
447
Ingo Molnarb2133c82007-08-24 20:39:10 +0200448 /*
449 * Do not boost sleepers if there's too much bonus 'in flight'
450 * already:
451 */
452 if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
453 return;
454
Peter Zijlstrae59c80c2007-10-15 17:00:03 +0200455 if (sched_feat(SLEEPER_LOAD_AVG))
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200456 load = rq_of(cfs_rq)->cpu_load[2];
457
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200458 /*
459 * Fix up delta_fair with the effect of us running
460 * during the whole sleep period:
461 */
Peter Zijlstrae59c80c2007-10-15 17:00:03 +0200462 if (sched_feat(SLEEPER_AVG))
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200463 delta_fair = div64_likely32((u64)delta_fair * load,
464 load + se->load.weight);
465
Ingo Molnar08e23882007-10-15 17:00:04 +0200466 delta_fair = calc_weighted(delta_fair, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200467
468 prev_runtime = se->wait_runtime;
469 __add_wait_runtime(cfs_rq, se, delta_fair);
470 delta_fair = se->wait_runtime - prev_runtime;
471
472 /*
473 * Track the amount of bonus we've given to sleepers:
474 */
475 cfs_rq->sleeper_bonus += delta_fair;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200476}
477
Ingo Molnar2396af62007-08-09 11:16:48 +0200478static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200479{
480 struct task_struct *tsk = task_of(se);
481 unsigned long delta_fair;
482
483 if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
Peter Zijlstrae59c80c2007-10-15 17:00:03 +0200484 !sched_feat(FAIR_SLEEPERS))
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200485 return;
486
487 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
488 (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
489
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200490 __enqueue_sleeper(cfs_rq, se, delta_fair);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200491
492 se->sleep_start_fair = 0;
493
494#ifdef CONFIG_SCHEDSTATS
495 if (se->sleep_start) {
Ingo Molnard2819182007-08-09 11:16:47 +0200496 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200497
498 if ((s64)delta < 0)
499 delta = 0;
500
501 if (unlikely(delta > se->sleep_max))
502 se->sleep_max = delta;
503
504 se->sleep_start = 0;
505 se->sum_sleep_runtime += delta;
506 }
507 if (se->block_start) {
Ingo Molnard2819182007-08-09 11:16:47 +0200508 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200509
510 if ((s64)delta < 0)
511 delta = 0;
512
513 if (unlikely(delta > se->block_max))
514 se->block_max = delta;
515
516 se->block_start = 0;
517 se->sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +0200518
519 /*
520 * Blocking time is in units of nanosecs, so shift by 20 to
521 * get a milliseconds-range estimation of the amount of
522 * time that the task spent sleeping:
523 */
524 if (unlikely(prof_on == SLEEP_PROFILING)) {
525 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
526 delta >> 20);
527 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200528 }
529#endif
530}
531
532static void
Ingo Molnar668031c2007-08-09 11:16:48 +0200533enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200534{
535 /*
536 * Update the fair clock.
537 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200538 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200539
Ingo Molnare9acbff2007-10-15 17:00:04 +0200540 if (wakeup) {
541 u64 min_runtime, latency;
542
543 min_runtime = cfs_rq->min_vruntime;
544 min_runtime += sysctl_sched_latency/2;
545
546 if (sched_feat(NEW_FAIR_SLEEPERS)) {
547 latency = calc_weighted(sysctl_sched_latency, se);
548 if (min_runtime > latency)
549 min_runtime -= latency;
550 }
551
552 se->vruntime = max(se->vruntime, min_runtime);
553
Ingo Molnar2396af62007-08-09 11:16:48 +0200554 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +0200555 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200556
Ingo Molnard2417e52007-08-09 11:16:47 +0200557 update_stats_enqueue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200558 __enqueue_entity(cfs_rq, se);
559}
560
561static void
Ingo Molnar525c2712007-08-09 11:16:48 +0200562dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200563{
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200564 update_stats_dequeue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200565 if (sleep) {
566 se->sleep_start_fair = cfs_rq->fair_clock;
567#ifdef CONFIG_SCHEDSTATS
568 if (entity_is_task(se)) {
569 struct task_struct *tsk = task_of(se);
570
571 if (tsk->state & TASK_INTERRUPTIBLE)
Ingo Molnard2819182007-08-09 11:16:47 +0200572 se->sleep_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200573 if (tsk->state & TASK_UNINTERRUPTIBLE)
Ingo Molnard2819182007-08-09 11:16:47 +0200574 se->block_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200575 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200576#endif
577 }
578 __dequeue_entity(cfs_rq, se);
579}
580
581/*
582 * Preempt the current task with a newly woken task if needed:
583 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +0200584static void
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200585__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200586 struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200587{
Peter Zijlstra11697832007-09-05 14:32:49 +0200588 unsigned long ideal_runtime, delta_exec;
589
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200590 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +0200591 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
592 if (delta_exec > ideal_runtime)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200593 resched_task(rq_of(cfs_rq)->curr);
594}
595
596static inline void
Ingo Molnar8494f412007-08-09 11:16:48 +0200597set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200598{
599 /*
600 * Any task has to be enqueued before it get to execute on
601 * a CPU. So account for the time it spent waiting on the
602 * runqueue. (note, here we rely on pick_next_task() having
603 * done a put_prev_task_fair() shortly before this, which
604 * updated rq->fair_clock - used by update_stats_wait_end())
605 */
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200606 update_stats_wait_end(cfs_rq, se);
Ingo Molnar79303e92007-08-09 11:16:47 +0200607 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +0200608 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +0200609#ifdef CONFIG_SCHEDSTATS
610 /*
611 * Track our maximum slice length, if the CPU's load is at
612 * least twice that of our own weight (i.e. dont track it
613 * when there are only lesser-weight tasks around):
614 */
615 if (rq_of(cfs_rq)->ls.load.weight >= 2*se->load.weight) {
616 se->slice_max = max(se->slice_max,
617 se->sum_exec_runtime - se->prev_sum_exec_runtime);
618 }
619#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +0200620 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200621}
622
Ingo Molnar9948f4b2007-08-09 11:16:48 +0200623static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200624{
625 struct sched_entity *se = __pick_next_entity(cfs_rq);
626
Ingo Molnar8494f412007-08-09 11:16:48 +0200627 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200628
629 return se;
630}
631
Ingo Molnarab6cde22007-08-09 11:16:48 +0200632static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200633{
634 /*
635 * If still on the runqueue then deactivate_task()
636 * was not called and update_curr() has to be done:
637 */
638 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200639 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200640
Ingo Molnarc7e9b5b2007-08-09 11:16:48 +0200641 update_stats_curr_end(cfs_rq, prev);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200642
643 if (prev->on_rq)
Ingo Molnar5870db52007-08-09 11:16:47 +0200644 update_stats_wait_start(cfs_rq, prev);
Ingo Molnar429d43b2007-10-15 17:00:03 +0200645 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200646}
647
648static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
649{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200650 struct sched_entity *next;
Ingo Molnarc1b3da32007-08-09 11:16:47 +0200651
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200652 /*
653 * Dequeue and enqueue the task to update its
654 * position within the tree:
655 */
Ingo Molnar525c2712007-08-09 11:16:48 +0200656 dequeue_entity(cfs_rq, curr, 0);
Ingo Molnar668031c2007-08-09 11:16:48 +0200657 enqueue_entity(cfs_rq, curr, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200658
659 /*
660 * Reschedule if another task tops the current one.
661 */
662 next = __pick_next_entity(cfs_rq);
663 if (next == curr)
664 return;
665
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200666 __check_preempt_curr_fair(cfs_rq, next, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200667}
668
669/**************************************************
670 * CFS operations on tasks:
671 */
672
673#ifdef CONFIG_FAIR_GROUP_SCHED
674
675/* Walk up scheduling entities hierarchy */
676#define for_each_sched_entity(se) \
677 for (; se; se = se->parent)
678
679static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
680{
681 return p->se.cfs_rq;
682}
683
684/* runqueue on which this entity is (to be) queued */
685static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
686{
687 return se->cfs_rq;
688}
689
690/* runqueue "owned" by this group */
691static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
692{
693 return grp->my_q;
694}
695
696/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
697 * another cpu ('this_cpu')
698 */
699static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
700{
701 /* A later patch will take group into account */
702 return &cpu_rq(this_cpu)->cfs;
703}
704
705/* Iterate thr' all leaf cfs_rq's on a runqueue */
706#define for_each_leaf_cfs_rq(rq, cfs_rq) \
707 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
708
709/* Do the two (enqueued) tasks belong to the same group ? */
710static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
711{
712 if (curr->se.cfs_rq == p->se.cfs_rq)
713 return 1;
714
715 return 0;
716}
717
718#else /* CONFIG_FAIR_GROUP_SCHED */
719
720#define for_each_sched_entity(se) \
721 for (; se; se = NULL)
722
723static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
724{
725 return &task_rq(p)->cfs;
726}
727
728static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
729{
730 struct task_struct *p = task_of(se);
731 struct rq *rq = task_rq(p);
732
733 return &rq->cfs;
734}
735
736/* runqueue "owned" by this group */
737static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
738{
739 return NULL;
740}
741
742static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
743{
744 return &cpu_rq(this_cpu)->cfs;
745}
746
747#define for_each_leaf_cfs_rq(rq, cfs_rq) \
748 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
749
750static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
751{
752 return 1;
753}
754
755#endif /* CONFIG_FAIR_GROUP_SCHED */
756
757/*
758 * The enqueue_task method is called before nr_running is
759 * increased. Here we update the fair scheduling stats and
760 * then put the task into the rbtree:
761 */
Ingo Molnarfd390f62007-08-09 11:16:48 +0200762static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200763{
764 struct cfs_rq *cfs_rq;
765 struct sched_entity *se = &p->se;
766
767 for_each_sched_entity(se) {
768 if (se->on_rq)
769 break;
770 cfs_rq = cfs_rq_of(se);
Ingo Molnar668031c2007-08-09 11:16:48 +0200771 enqueue_entity(cfs_rq, se, wakeup);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200772 }
773}
774
775/*
776 * The dequeue_task method is called before nr_running is
777 * decreased. We remove the task from the rbtree and
778 * update the fair scheduling stats:
779 */
Ingo Molnarf02231e2007-08-09 11:16:48 +0200780static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200781{
782 struct cfs_rq *cfs_rq;
783 struct sched_entity *se = &p->se;
784
785 for_each_sched_entity(se) {
786 cfs_rq = cfs_rq_of(se);
Ingo Molnar525c2712007-08-09 11:16:48 +0200787 dequeue_entity(cfs_rq, se, sleep);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200788 /* Don't dequeue parent if it has other entities besides us */
789 if (cfs_rq->load.weight)
790 break;
791 }
792}
793
794/*
Ingo Molnar1799e352007-09-19 23:34:46 +0200795 * sched_yield() support is very simple - we dequeue and enqueue.
796 *
797 * If compat_yield is turned on then we requeue to the end of the tree.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200798 */
799static void yield_task_fair(struct rq *rq, struct task_struct *p)
800{
801 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Ingo Molnar1799e352007-09-19 23:34:46 +0200802 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
803 struct sched_entity *rightmost, *se = &p->se;
804 struct rb_node *parent;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200805
806 /*
Ingo Molnar1799e352007-09-19 23:34:46 +0200807 * Are we the only task in the tree?
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200808 */
Ingo Molnar1799e352007-09-19 23:34:46 +0200809 if (unlikely(cfs_rq->nr_running == 1))
810 return;
811
812 if (likely(!sysctl_sched_compat_yield)) {
813 __update_rq_clock(rq);
814 /*
815 * Dequeue and enqueue the task to update its
816 * position within the tree:
817 */
818 dequeue_entity(cfs_rq, &p->se, 0);
819 enqueue_entity(cfs_rq, &p->se, 0);
820
821 return;
822 }
823 /*
824 * Find the rightmost entry in the rbtree:
825 */
826 do {
827 parent = *link;
828 link = &parent->rb_right;
829 } while (*link);
830
831 rightmost = rb_entry(parent, struct sched_entity, run_node);
832 /*
833 * Already in the rightmost position?
834 */
835 if (unlikely(rightmost == se))
836 return;
837
838 /*
839 * Minimally necessary key value to be last in the tree:
840 */
841 se->fair_key = rightmost->fair_key + 1;
842
843 if (cfs_rq->rb_leftmost == &se->run_node)
844 cfs_rq->rb_leftmost = rb_next(&se->run_node);
845 /*
846 * Relink the task to the rightmost position:
847 */
848 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
849 rb_link_node(&se->run_node, parent, link);
850 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200851}
852
853/*
854 * Preempt the current task with a newly woken task if needed:
855 */
856static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
857{
858 struct task_struct *curr = rq->curr;
859 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200860
861 if (unlikely(rt_prio(p->prio))) {
Ingo Molnara8e504d2007-08-09 11:16:47 +0200862 update_rq_clock(rq);
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200863 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200864 resched_task(curr);
865 return;
866 }
867
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200868 if (is_same_group(curr, p))
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200869 __check_preempt_curr_fair(cfs_rq, &p->se, &curr->se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200870}
871
Ingo Molnarfb8d4722007-08-09 11:16:48 +0200872static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200873{
874 struct cfs_rq *cfs_rq = &rq->cfs;
875 struct sched_entity *se;
876
877 if (unlikely(!cfs_rq->nr_running))
878 return NULL;
879
880 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +0200881 se = pick_next_entity(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200882 cfs_rq = group_cfs_rq(se);
883 } while (cfs_rq);
884
885 return task_of(se);
886}
887
888/*
889 * Account for a descheduled task:
890 */
Ingo Molnar31ee5292007-08-09 11:16:49 +0200891static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200892{
893 struct sched_entity *se = &prev->se;
894 struct cfs_rq *cfs_rq;
895
896 for_each_sched_entity(se) {
897 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +0200898 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200899 }
900}
901
902/**************************************************
903 * Fair scheduling class load-balancing methods:
904 */
905
906/*
907 * Load-balancing iterator. Note: while the runqueue stays locked
908 * during the whole iteration, the current task might be
909 * dequeued so the iterator has to be dequeue-safe. Here we
910 * achieve that by always pre-iterating before returning
911 * the current task:
912 */
913static inline struct task_struct *
914__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
915{
916 struct task_struct *p;
917
918 if (!curr)
919 return NULL;
920
921 p = rb_entry(curr, struct task_struct, se.run_node);
922 cfs_rq->rb_load_balance_curr = rb_next(curr);
923
924 return p;
925}
926
927static struct task_struct *load_balance_start_fair(void *arg)
928{
929 struct cfs_rq *cfs_rq = arg;
930
931 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
932}
933
934static struct task_struct *load_balance_next_fair(void *arg)
935{
936 struct cfs_rq *cfs_rq = arg;
937
938 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
939}
940
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200941#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200942static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
943{
944 struct sched_entity *curr;
945 struct task_struct *p;
946
947 if (!cfs_rq->nr_running)
948 return MAX_PRIO;
949
950 curr = __pick_next_entity(cfs_rq);
951 p = task_of(curr);
952
953 return p->prio;
954}
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200955#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200956
Peter Williams43010652007-08-09 11:16:46 +0200957static unsigned long
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200958load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200959 unsigned long max_nr_move, unsigned long max_load_move,
960 struct sched_domain *sd, enum cpu_idle_type idle,
961 int *all_pinned, int *this_best_prio)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200962{
963 struct cfs_rq *busy_cfs_rq;
964 unsigned long load_moved, total_nr_moved = 0, nr_moved;
965 long rem_load_move = max_load_move;
966 struct rq_iterator cfs_rq_iterator;
967
968 cfs_rq_iterator.start = load_balance_start_fair;
969 cfs_rq_iterator.next = load_balance_next_fair;
970
971 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200972#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200973 struct cfs_rq *this_cfs_rq;
Ingo Molnare56f31a2007-08-10 23:05:11 +0200974 long imbalance;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200975 unsigned long maxload;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200976
977 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
978
Ingo Molnare56f31a2007-08-10 23:05:11 +0200979 imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200980 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
981 if (imbalance <= 0)
982 continue;
983
984 /* Don't pull more than imbalance/2 */
985 imbalance /= 2;
986 maxload = min(rem_load_move, imbalance);
987
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200988 *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
989#else
Ingo Molnare56f31a2007-08-10 23:05:11 +0200990# define maxload rem_load_move
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200991#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200992 /* pass busy_cfs_rq argument into
993 * load_balance_[start|next]_fair iterators
994 */
995 cfs_rq_iterator.arg = busy_cfs_rq;
996 nr_moved = balance_tasks(this_rq, this_cpu, busiest,
997 max_nr_move, maxload, sd, idle, all_pinned,
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200998 &load_moved, this_best_prio, &cfs_rq_iterator);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200999
1000 total_nr_moved += nr_moved;
1001 max_nr_move -= nr_moved;
1002 rem_load_move -= load_moved;
1003
1004 if (max_nr_move <= 0 || rem_load_move <= 0)
1005 break;
1006 }
1007
Peter Williams43010652007-08-09 11:16:46 +02001008 return max_load_move - rem_load_move;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001009}
1010
1011/*
1012 * scheduler tick hitting a task of our scheduling class:
1013 */
1014static void task_tick_fair(struct rq *rq, struct task_struct *curr)
1015{
1016 struct cfs_rq *cfs_rq;
1017 struct sched_entity *se = &curr->se;
1018
1019 for_each_sched_entity(se) {
1020 cfs_rq = cfs_rq_of(se);
1021 entity_tick(cfs_rq, se);
1022 }
1023}
1024
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02001025#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
1026
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001027/*
1028 * Share the fairness runtime between parent and child, thus the
1029 * total amount of pressure for CPU stays equal - new tasks
1030 * get a chance to run but frequent forkers are not allowed to
1031 * monopolize the CPU. Note: the parent runqueue is locked,
1032 * the child is not running yet.
1033 */
Ingo Molnaree0827d2007-08-09 11:16:49 +02001034static void task_new_fair(struct rq *rq, struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001035{
1036 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Ingo Molnar429d43b2007-10-15 17:00:03 +02001037 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001038
1039 sched_info_queued(p);
1040
Ting Yang7109c442007-08-28 12:53:24 +02001041 update_curr(cfs_rq);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02001042 se->vruntime = cfs_rq->min_vruntime;
Ingo Molnard2417e52007-08-09 11:16:47 +02001043 update_stats_enqueue(cfs_rq, se);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02001044
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001045 /*
1046 * The first wait is dominated by the child-runs-first logic,
1047 * so do not credit it with that waiting time yet:
1048 */
Peter Zijlstrae59c80c2007-10-15 17:00:03 +02001049 if (sched_feat(SKIP_INITIAL))
Ingo Molnar9f508f82007-08-28 12:53:24 +02001050 se->wait_start_fair = 0;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001051
1052 /*
1053 * The statistical average of wait_runtime is about
1054 * -granularity/2, so initialize the task with that:
1055 */
Peter Zijlstrae59c80c2007-10-15 17:00:03 +02001056 if (sched_feat(START_DEBIT))
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02001057 se->wait_runtime = -(__sched_period(cfs_rq->nr_running+1) / 2);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001058
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02001059 if (sysctl_sched_child_runs_first &&
1060 curr->vruntime < se->vruntime) {
1061
1062 dequeue_entity(cfs_rq, curr, 0);
1063 swap(curr->vruntime, se->vruntime);
1064 enqueue_entity(cfs_rq, curr, 0);
1065 }
1066
Ingo Molnare9acbff2007-10-15 17:00:04 +02001067 update_stats_enqueue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001068 __enqueue_entity(cfs_rq, se);
Ingo Molnarbb61c212007-10-15 17:00:02 +02001069 resched_task(rq->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001070}
1071
1072#ifdef CONFIG_FAIR_GROUP_SCHED
1073/* Account for a task changing its policy or group.
1074 *
1075 * This routine is mostly called to set cfs_rq->curr field when a task
1076 * migrates between groups/classes.
1077 */
1078static void set_curr_task_fair(struct rq *rq)
1079{
Bruce Ashfield7c6c16f2007-08-24 20:39:10 +02001080 struct sched_entity *se = &rq->curr->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001081
Ingo Molnarc3b64f12007-08-09 11:16:51 +02001082 for_each_sched_entity(se)
1083 set_next_entity(cfs_rq_of(se), se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001084}
1085#else
1086static void set_curr_task_fair(struct rq *rq)
1087{
1088}
1089#endif
1090
1091/*
1092 * All the scheduling class methods:
1093 */
1094struct sched_class fair_sched_class __read_mostly = {
1095 .enqueue_task = enqueue_task_fair,
1096 .dequeue_task = dequeue_task_fair,
1097 .yield_task = yield_task_fair,
1098
1099 .check_preempt_curr = check_preempt_curr_fair,
1100
1101 .pick_next_task = pick_next_task_fair,
1102 .put_prev_task = put_prev_task_fair,
1103
1104 .load_balance = load_balance_fair,
1105
1106 .set_curr_task = set_curr_task_fair,
1107 .task_tick = task_tick_fair,
1108 .task_new = task_new_fair,
1109};
1110
1111#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02001112static void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001113{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001114 struct cfs_rq *cfs_rq;
1115
Ingo Molnarc3b64f12007-08-09 11:16:51 +02001116 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02001117 print_cfs_rq(m, cpu, cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001118}
1119#endif