blob: 629614ad035838424ca7dfcf6bd67dd43bc7f8cf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
Ingo Molnarc31f2e82007-07-09 18:52:01 +020019 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
Ingo Molnarb9131762008-01-25 21:08:19 +010025 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/highmem.h>
35#include <linux/smp_lock.h>
36#include <asm/mmu_context.h>
37#include <linux/interrupt.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080038#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/completion.h>
40#include <linux/kernel_stat.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070041#include <linux/debug_locks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080045#include <linux/freezer.h>
akpm@osdl.org198e2f12006-01-12 01:05:30 -080046#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/blkdev.h>
48#include <linux/delay.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070049#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
57#include <linux/kthread.h>
58#include <linux/seq_file.h>
Nick Piggine692ab52007-07-26 13:40:43 +020059#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#include <linux/syscalls.h>
61#include <linux/times.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070062#include <linux/tsacct_kern.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080063#include <linux/kprobes.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070064#include <linux/delayacct.h>
Eric Dumazet5517d862007-05-08 00:32:57 -070065#include <linux/reciprocal_div.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020066#include <linux/unistd.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020067#include <linux/pagemap.h>
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010068#include <linux/hrtimer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Eric Dumazet5517d862007-05-08 00:32:57 -070070#include <asm/tlb.h>
Satyam Sharma838225b2007-10-24 18:23:50 +020071#include <asm/irq_regs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73/*
Alexey Dobriyanb035b6d2007-02-10 01:45:10 -080074 * Scheduler clock - returns current time in nanosec units.
75 * This is default implementation.
76 * Architectures and sub-architectures can override this.
77 */
78unsigned long long __attribute__((weak)) sched_clock(void)
79{
Eric Dumazetd6322fa2007-11-09 22:39:38 +010080 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
Alexey Dobriyanb035b6d2007-02-10 01:45:10 -080081}
82
83/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 * Convert user-nice values [ -20 ... 0 ... 19 ]
85 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
86 * and back.
87 */
88#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
89#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
90#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
91
92/*
93 * 'User priority' is the nice value converted to something we
94 * can work with better when scaling various scheduler parameters,
95 * it's a [ 0 ... 39 ] range.
96 */
97#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
98#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
99#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
100
101/*
Ingo Molnard7876a02008-01-25 21:08:19 +0100102 * Helpers for converting nanosecond timing to jiffy resolution
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 */
Eric Dumazetd6322fa2007-11-09 22:39:38 +0100104#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200106#define NICE_0_LOAD SCHED_LOAD_SCALE
107#define NICE_0_SHIFT SCHED_LOAD_SHIFT
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/*
110 * These are the 'tuning knobs' of the scheduler:
111 *
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +0200112 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * Timeslices get refilled after they expire.
114 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#define DEF_TIMESLICE (100 * HZ / 1000)
Peter Williams2dd73a42006-06-27 02:54:34 -0700116
Eric Dumazet5517d862007-05-08 00:32:57 -0700117#ifdef CONFIG_SMP
118/*
119 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
120 * Since cpu_power is a 'constant', we can use a reciprocal divide.
121 */
122static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
123{
124 return reciprocal_divide(load, sg->reciprocal_cpu_power);
125}
126
127/*
128 * Each time a sched group cpu_power is changed,
129 * we must compute its reciprocal value
130 */
131static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
132{
133 sg->__cpu_power += val;
134 sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
135}
136#endif
137
Ingo Molnare05606d2007-07-09 18:51:59 +0200138static inline int rt_policy(int policy)
139{
140 if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
141 return 1;
142 return 0;
143}
144
145static inline int task_has_rt_policy(struct task_struct *p)
146{
147 return rt_policy(p->policy);
148}
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150/*
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200151 * This is the priority-queue data structure of the RT scheduling class:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 */
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200153struct rt_prio_array {
154 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
155 struct list_head queue[MAX_RT_PRIO];
156};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200158#ifdef CONFIG_FAIR_GROUP_SCHED
159
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700160#include <linux/cgroup.h>
161
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200162struct cfs_rq;
163
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100164static LIST_HEAD(task_groups);
165
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200166/* task group related information */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200167struct task_group {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700168#ifdef CONFIG_FAIR_CGROUP_SCHED
169 struct cgroup_subsys_state css;
170#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200171 /* schedulable entities of this group on each cpu */
172 struct sched_entity **se;
173 /* runqueue "owned" by this group on each cpu */
174 struct cfs_rq **cfs_rq;
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100175
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100176 struct sched_rt_entity **rt_se;
177 struct rt_rq **rt_rq;
178
179 unsigned int rt_ratio;
180
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100181 /*
182 * shares assigned to a task group governs how much of cpu bandwidth
183 * is allocated to the group. The more shares a group has, the more is
184 * the cpu bandwidth allocated to it.
185 *
186 * For ex, lets say that there are three task groups, A, B and C which
187 * have been assigned shares 1000, 2000 and 3000 respectively. Then,
188 * cpu bandwidth allocated by the scheduler to task groups A, B and C
189 * should be:
190 *
191 * Bw(A) = 1000/(1000+2000+3000) * 100 = 16.66%
192 * Bw(B) = 2000/(1000+2000+3000) * 100 = 33.33%
Ingo Molnar03319ec2008-01-25 21:08:28 +0100193 * Bw(C) = 3000/(1000+2000+3000) * 100 = 50%
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100194 *
195 * The weight assigned to a task group's schedulable entities on every
196 * cpu (task_group.se[a_cpu]->load.weight) is derived from the task
197 * group's shares. For ex: lets say that task group A has been
198 * assigned shares of 1000 and there are two CPUs in a system. Then,
199 *
200 * tg_A->se[0]->load.weight = tg_A->se[1]->load.weight = 1000;
201 *
202 * Note: It's not necessary that each of a task's group schedulable
Ingo Molnar03319ec2008-01-25 21:08:28 +0100203 * entity have the same weight on all CPUs. If the group
204 * has 2 of its tasks on CPU0 and 1 task on CPU1, then a
205 * better distribution of weight could be:
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100206 *
207 * tg_A->se[0]->load.weight = 2/3 * 2000 = 1333
208 * tg_A->se[1]->load.weight = 1/2 * 2000 = 667
209 *
210 * rebalance_shares() is responsible for distributing the shares of a
211 * task groups like this among the group's schedulable entities across
212 * cpus.
213 *
214 */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200215 unsigned long shares;
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100216
Srivatsa Vaddagiriae8393e2007-10-29 21:18:11 +0100217 struct rcu_head rcu;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100218 struct list_head list;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200219};
220
221/* Default task group's sched entity on each cpu */
222static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
223/* Default task group's cfs_rq on each cpu */
224static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
225
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100226static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
227static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
228
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +0200229static struct sched_entity *init_sched_entity_p[NR_CPUS];
230static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200231
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100232static struct sched_rt_entity *init_sched_rt_entity_p[NR_CPUS];
233static struct rt_rq *init_rt_rq_p[NR_CPUS];
234
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100235/* task_group_mutex serializes add/remove of task groups and also changes to
236 * a task group's cpu shares.
237 */
238static DEFINE_MUTEX(task_group_mutex);
239
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +0100240/* doms_cur_mutex serializes access to doms_cur[] array */
241static DEFINE_MUTEX(doms_cur_mutex);
242
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100243#ifdef CONFIG_SMP
244/* kernel thread that runs rebalance_shares() periodically */
245static struct task_struct *lb_monitor_task;
246static int load_balance_monitor(void *unused);
247#endif
248
249static void set_se_shares(struct sched_entity *se, unsigned long shares);
250
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200251/* Default task group.
Ingo Molnar3a252012007-10-15 17:00:12 +0200252 * Every task in system belong to this group at bootup.
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200253 */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200254struct task_group init_task_group = {
Ingo Molnar0eab9142008-01-25 21:08:19 +0100255 .se = init_sched_entity_p,
Ingo Molnar3a252012007-10-15 17:00:12 +0200256 .cfs_rq = init_cfs_rq_p,
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100257
258 .rt_se = init_sched_rt_entity_p,
259 .rt_rq = init_rt_rq_p,
Ingo Molnar3a252012007-10-15 17:00:12 +0200260};
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +0200261
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200262#ifdef CONFIG_FAIR_USER_SCHED
Ingo Molnar0eab9142008-01-25 21:08:19 +0100263# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200264#else
Srivatsa Vaddagiri93f992c2008-01-25 21:07:59 +0100265# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200266#endif
267
Ingo Molnar0eab9142008-01-25 21:08:19 +0100268#define MIN_GROUP_SHARES 2
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100269
Srivatsa Vaddagiri93f992c2008-01-25 21:07:59 +0100270static int init_task_group_load = INIT_TASK_GROUP_LOAD;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200271
272/* return group to which a task belongs */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200273static inline struct task_group *task_group(struct task_struct *p)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200274{
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200275 struct task_group *tg;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +0200276
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200277#ifdef CONFIG_FAIR_USER_SCHED
278 tg = p->user->tg;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700279#elif defined(CONFIG_FAIR_CGROUP_SCHED)
280 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
281 struct task_group, css);
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200282#else
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100283 tg = &init_task_group;
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200284#endif
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +0200285 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200286}
287
288/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100289static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200290{
Dmitry Adamushkoce96b5a2007-11-15 20:57:40 +0100291 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
292 p->se.parent = task_group(p)->se[cpu];
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100293
294 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
295 p->rt.parent = task_group(p)->rt_se[cpu];
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200296}
297
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100298static inline void lock_task_group_list(void)
299{
300 mutex_lock(&task_group_mutex);
301}
302
303static inline void unlock_task_group_list(void)
304{
305 mutex_unlock(&task_group_mutex);
306}
307
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +0100308static inline void lock_doms_cur(void)
309{
310 mutex_lock(&doms_cur_mutex);
311}
312
313static inline void unlock_doms_cur(void)
314{
315 mutex_unlock(&doms_cur_mutex);
316}
317
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200318#else
319
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100320static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100321static inline void lock_task_group_list(void) { }
322static inline void unlock_task_group_list(void) { }
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +0100323static inline void lock_doms_cur(void) { }
324static inline void unlock_doms_cur(void) { }
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200325
326#endif /* CONFIG_FAIR_GROUP_SCHED */
327
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200328/* CFS-related fields in a runqueue */
329struct cfs_rq {
330 struct load_weight load;
331 unsigned long nr_running;
332
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200333 u64 exec_clock;
Ingo Molnare9acbff2007-10-15 17:00:04 +0200334 u64 min_vruntime;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200335
336 struct rb_root tasks_timeline;
337 struct rb_node *rb_leftmost;
338 struct rb_node *rb_load_balance_curr;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200339 /* 'curr' points to currently running entity on this cfs_rq.
340 * It is set to NULL otherwise (i.e when none are currently running).
341 */
342 struct sched_entity *curr;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200343
344 unsigned long nr_spread_over;
345
Ingo Molnar62160e32007-10-15 17:00:03 +0200346#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200347 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
348
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100349 /*
350 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200351 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
352 * (like users, containers etc.)
353 *
354 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
355 * list is used during load balance.
356 */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100357 struct list_head leaf_cfs_rq_list;
358 struct task_group *tg; /* group that "owns" this runqueue */
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200359#endif
360};
361
362/* Real-Time classes' related field in a runqueue: */
363struct rt_rq {
364 struct rt_prio_array active;
Steven Rostedt63489e42008-01-25 21:08:03 +0100365 unsigned long rt_nr_running;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100366#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
367 int highest_prio; /* highest queued rt task prio */
368#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100369#ifdef CONFIG_SMP
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100370 unsigned long rt_nr_migratory;
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +0100371 int overloaded;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100372#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100373 int rt_throttled;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100374 u64 rt_time;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100375
376#ifdef CONFIG_FAIR_GROUP_SCHED
377 struct rq *rq;
378 struct list_head leaf_rt_rq_list;
379 struct task_group *tg;
380 struct sched_rt_entity *rt_se;
381#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200382};
383
Gregory Haskins57d885f2008-01-25 21:08:18 +0100384#ifdef CONFIG_SMP
385
386/*
387 * We add the notion of a root-domain which will be used to define per-domain
Ingo Molnar0eab9142008-01-25 21:08:19 +0100388 * variables. Each exclusive cpuset essentially defines an island domain by
389 * fully partitioning the member cpus from any other cpuset. Whenever a new
Gregory Haskins57d885f2008-01-25 21:08:18 +0100390 * exclusive cpuset is created, we also create and attach a new root-domain
391 * object.
392 *
Gregory Haskins57d885f2008-01-25 21:08:18 +0100393 */
394struct root_domain {
395 atomic_t refcount;
396 cpumask_t span;
397 cpumask_t online;
Gregory Haskins637f5082008-01-25 21:08:18 +0100398
Ingo Molnar0eab9142008-01-25 21:08:19 +0100399 /*
Gregory Haskins637f5082008-01-25 21:08:18 +0100400 * The "RT overload" flag: it gets set if a CPU has more than
401 * one runnable RT task.
402 */
403 cpumask_t rto_mask;
Ingo Molnar0eab9142008-01-25 21:08:19 +0100404 atomic_t rto_count;
Gregory Haskins57d885f2008-01-25 21:08:18 +0100405};
406
Gregory Haskinsdc938522008-01-25 21:08:26 +0100407/*
408 * By default the system creates a single root-domain with all cpus as
409 * members (mimicking the global state we have today).
410 */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100411static struct root_domain def_root_domain;
412
413#endif
414
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200415/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 * This is the main, per-CPU runqueue data structure.
417 *
418 * Locking rule: those places that want to lock multiple runqueues
419 * (such as the load balancing or the thread migration code), lock
420 * acquire operations must be ordered by ascending &runqueue.
421 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700422struct rq {
Ingo Molnard8016492007-10-18 21:32:55 +0200423 /* runqueue lock: */
424 spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
426 /*
427 * nr_running and cpu_load should be in the same cacheline because
428 * remote CPUs use both these fields when doing load calculation.
429 */
430 unsigned long nr_running;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200431 #define CPU_LOAD_IDX_MAX 5
432 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Siddha, Suresh Bbdecea32007-05-08 00:32:48 -0700433 unsigned char idle_at_tick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700434#ifdef CONFIG_NO_HZ
435 unsigned char in_nohz_recently;
436#endif
Ingo Molnard8016492007-10-18 21:32:55 +0200437 /* capture load from *all* tasks on this cpu: */
438 struct load_weight load;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200439 unsigned long nr_load_updates;
440 u64 nr_switches;
441
442 struct cfs_rq cfs;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100443 struct rt_rq rt;
444 u64 rt_period_expire;
Peter Zijlstra48d5e252008-01-25 21:08:31 +0100445 int rt_throttled;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100446
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200447#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnard8016492007-10-18 21:32:55 +0200448 /* list of leaf cfs_rq on this cpu: */
449 struct list_head leaf_cfs_rq_list;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100450 struct list_head leaf_rt_rq_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
453 /*
454 * This is part of a global counter where only the total sum
455 * over all CPUs matters. A task can increase this counter on
456 * one CPU and if it got migrated afterwards it may decrease
457 * it on another CPU. Always updated under the runqueue lock:
458 */
459 unsigned long nr_uninterruptible;
460
Ingo Molnar36c8b582006-07-03 00:25:41 -0700461 struct task_struct *curr, *idle;
Christoph Lameterc9819f42006-12-10 02:20:25 -0800462 unsigned long next_balance;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 struct mm_struct *prev_mm;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200464
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200465 u64 clock, prev_clock_raw;
466 s64 clock_max_delta;
467
468 unsigned int clock_warps, clock_overflows;
Ingo Molnar2aa44d02007-08-23 15:18:02 +0200469 u64 idle_clock;
470 unsigned int clock_deep_idle_events;
Ingo Molnar529c7722007-08-10 23:05:11 +0200471 u64 tick_timestamp;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 atomic_t nr_iowait;
474
475#ifdef CONFIG_SMP
Ingo Molnar0eab9142008-01-25 21:08:19 +0100476 struct root_domain *rd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 struct sched_domain *sd;
478
479 /* For active balancing */
480 int active_balance;
481 int push_cpu;
Ingo Molnard8016492007-10-18 21:32:55 +0200482 /* cpu of this runqueue: */
483 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Ingo Molnar36c8b582006-07-03 00:25:41 -0700485 struct task_struct *migration_thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 struct list_head migration_queue;
487#endif
488
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100489#ifdef CONFIG_SCHED_HRTICK
490 unsigned long hrtick_flags;
491 ktime_t hrtick_expire;
492 struct hrtimer hrtick_timer;
493#endif
494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495#ifdef CONFIG_SCHEDSTATS
496 /* latency stats */
497 struct sched_info rq_sched_info;
498
499 /* sys_sched_yield() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200500 unsigned int yld_exp_empty;
501 unsigned int yld_act_empty;
502 unsigned int yld_both_empty;
503 unsigned int yld_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505 /* schedule() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200506 unsigned int sched_switch;
507 unsigned int sched_count;
508 unsigned int sched_goidle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510 /* try_to_wake_up() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200511 unsigned int ttwu_count;
512 unsigned int ttwu_local;
Ingo Molnarb8efb562007-10-15 17:00:10 +0200513
514 /* BKL stats */
Ken Chen480b9432007-10-18 21:32:56 +0200515 unsigned int bkl_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516#endif
Ingo Molnarfcb99372006-07-03 00:25:10 -0700517 struct lock_class_key rq_lock_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518};
519
Fenghua Yuf34e3b62007-07-19 01:48:13 -0700520static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
Ingo Molnardd41f592007-07-09 18:51:59 +0200522static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
523{
524 rq->curr->sched_class->check_preempt_curr(rq, p);
525}
526
Christoph Lameter0a2966b2006-09-25 23:30:51 -0700527static inline int cpu_of(struct rq *rq)
528{
529#ifdef CONFIG_SMP
530 return rq->cpu;
531#else
532 return 0;
533#endif
534}
535
Nick Piggin674311d2005-06-25 14:57:27 -0700536/*
Ingo Molnarb04a0f42007-08-09 11:16:46 +0200537 * Update the per-runqueue clock, as finegrained as the platform can give
538 * us, but without assuming monotonicity, etc.:
Ingo Molnar20d315d2007-07-09 18:51:58 +0200539 */
Ingo Molnarb04a0f42007-08-09 11:16:46 +0200540static void __update_rq_clock(struct rq *rq)
Ingo Molnar20d315d2007-07-09 18:51:58 +0200541{
542 u64 prev_raw = rq->prev_clock_raw;
543 u64 now = sched_clock();
544 s64 delta = now - prev_raw;
545 u64 clock = rq->clock;
546
Ingo Molnarb04a0f42007-08-09 11:16:46 +0200547#ifdef CONFIG_SCHED_DEBUG
548 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
549#endif
Ingo Molnar20d315d2007-07-09 18:51:58 +0200550 /*
551 * Protect against sched_clock() occasionally going backwards:
552 */
553 if (unlikely(delta < 0)) {
554 clock++;
555 rq->clock_warps++;
556 } else {
557 /*
558 * Catch too large forward jumps too:
559 */
Ingo Molnar529c7722007-08-10 23:05:11 +0200560 if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) {
561 if (clock < rq->tick_timestamp + TICK_NSEC)
562 clock = rq->tick_timestamp + TICK_NSEC;
563 else
564 clock++;
Ingo Molnar20d315d2007-07-09 18:51:58 +0200565 rq->clock_overflows++;
566 } else {
567 if (unlikely(delta > rq->clock_max_delta))
568 rq->clock_max_delta = delta;
569 clock += delta;
570 }
571 }
572
573 rq->prev_clock_raw = now;
574 rq->clock = clock;
Ingo Molnar20d315d2007-07-09 18:51:58 +0200575}
576
Ingo Molnarb04a0f42007-08-09 11:16:46 +0200577static void update_rq_clock(struct rq *rq)
Ingo Molnar20d315d2007-07-09 18:51:58 +0200578{
Ingo Molnarb04a0f42007-08-09 11:16:46 +0200579 if (likely(smp_processor_id() == cpu_of(rq)))
580 __update_rq_clock(rq);
581}
Ingo Molnar20d315d2007-07-09 18:51:58 +0200582
Ingo Molnar20d315d2007-07-09 18:51:58 +0200583/*
Nick Piggin674311d2005-06-25 14:57:27 -0700584 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -0700585 * See detach_destroy_domains: synchronize_sched for details.
Nick Piggin674311d2005-06-25 14:57:27 -0700586 *
587 * The domain tree of any CPU may only be accessed from within
588 * preempt-disabled sections.
589 */
Ingo Molnar48f24c42006-07-03 00:25:40 -0700590#define for_each_domain(cpu, __sd) \
591 for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
593#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
594#define this_rq() (&__get_cpu_var(runqueues))
595#define task_rq(p) cpu_rq(task_cpu(p))
596#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
597
Peter Zijlstra48d5e252008-01-25 21:08:31 +0100598unsigned long rt_needs_cpu(int cpu)
599{
600 struct rq *rq = cpu_rq(cpu);
601 u64 delta;
602
603 if (!rq->rt_throttled)
604 return 0;
605
606 if (rq->clock > rq->rt_period_expire)
607 return 1;
608
609 delta = rq->rt_period_expire - rq->clock;
610 do_div(delta, NSEC_PER_SEC / HZ);
611
612 return (unsigned long)delta;
613}
614
Ingo Molnare436d802007-07-19 21:28:35 +0200615/*
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200616 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
617 */
618#ifdef CONFIG_SCHED_DEBUG
619# define const_debug __read_mostly
620#else
621# define const_debug static const
622#endif
623
624/*
625 * Debugging: various feature bits
626 */
627enum {
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200628 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
Ingo Molnar96126332007-11-15 20:57:40 +0100629 SCHED_FEAT_WAKEUP_PREEMPT = 2,
630 SCHED_FEAT_START_DEBIT = 4,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100631 SCHED_FEAT_TREE_AVG = 8,
632 SCHED_FEAT_APPROX_AVG = 16,
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100633 SCHED_FEAT_HRTICK = 32,
634 SCHED_FEAT_DOUBLE_TICK = 64,
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200635};
636
637const_debug unsigned int sysctl_sched_features =
Ingo Molnar8401f772007-10-18 21:32:55 +0200638 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
Ingo Molnar96126332007-11-15 20:57:40 +0100639 SCHED_FEAT_WAKEUP_PREEMPT * 1 |
Ingo Molnar8401f772007-10-18 21:32:55 +0200640 SCHED_FEAT_START_DEBIT * 1 |
641 SCHED_FEAT_TREE_AVG * 0 |
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100642 SCHED_FEAT_APPROX_AVG * 0 |
643 SCHED_FEAT_HRTICK * 1 |
644 SCHED_FEAT_DOUBLE_TICK * 0;
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200645
646#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
647
648/*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +0100649 * Number of tasks to iterate in a single balance run.
650 * Limited because this is done with IRQs disabled.
651 */
652const_debug unsigned int sysctl_sched_nr_migrate = 32;
653
654/*
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100655 * period over which we measure -rt task cpu usage in ms.
656 * default: 1s
657 */
658const_debug unsigned int sysctl_sched_rt_period = 1000;
659
660#define SCHED_RT_FRAC_SHIFT 16
661#define SCHED_RT_FRAC (1UL << SCHED_RT_FRAC_SHIFT)
662
663/*
664 * ratio of time -rt tasks may consume.
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100665 * default: 95%
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100666 */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100667const_debug unsigned int sysctl_sched_rt_ratio = 62259;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100668
669/*
Ingo Molnare436d802007-07-19 21:28:35 +0200670 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
671 * clock constructed from sched_clock():
672 */
673unsigned long long cpu_clock(int cpu)
674{
Ingo Molnare436d802007-07-19 21:28:35 +0200675 unsigned long long now;
676 unsigned long flags;
Ingo Molnarb04a0f42007-08-09 11:16:46 +0200677 struct rq *rq;
Ingo Molnare436d802007-07-19 21:28:35 +0200678
Ingo Molnar2cd4d0e2007-07-26 13:40:43 +0200679 local_irq_save(flags);
Ingo Molnarb04a0f42007-08-09 11:16:46 +0200680 rq = cpu_rq(cpu);
Ingo Molnar8ced5f62007-12-07 19:02:47 +0100681 /*
682 * Only call sched_clock() if the scheduler has already been
683 * initialized (some code might call cpu_clock() very early):
684 */
685 if (rq->idle)
686 update_rq_clock(rq);
Ingo Molnarb04a0f42007-08-09 11:16:46 +0200687 now = rq->clock;
Ingo Molnar2cd4d0e2007-07-26 13:40:43 +0200688 local_irq_restore(flags);
Ingo Molnare436d802007-07-19 21:28:35 +0200689
690 return now;
691}
Paul E. McKenneya58f6f22007-10-15 17:00:14 +0200692EXPORT_SYMBOL_GPL(cpu_clock);
Ingo Molnare436d802007-07-19 21:28:35 +0200693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694#ifndef prepare_arch_switch
Nick Piggin4866cde2005-06-25 14:57:23 -0700695# define prepare_arch_switch(next) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700697#ifndef finish_arch_switch
698# define finish_arch_switch(prev) do { } while (0)
699#endif
700
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100701static inline int task_current(struct rq *rq, struct task_struct *p)
702{
703 return rq->curr == p;
704}
705
Nick Piggin4866cde2005-06-25 14:57:23 -0700706#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar70b97a72006-07-03 00:25:42 -0700707static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700708{
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100709 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700710}
711
Ingo Molnar70b97a72006-07-03 00:25:42 -0700712static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700713{
714}
715
Ingo Molnar70b97a72006-07-03 00:25:42 -0700716static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700717{
Ingo Molnarda04c032005-09-13 11:17:59 +0200718#ifdef CONFIG_DEBUG_SPINLOCK
719 /* this is a valid case when another task releases the spinlock */
720 rq->lock.owner = current;
721#endif
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700722 /*
723 * If we are tracking spinlock dependencies then we have to
724 * fix up the runqueue lock - which gets 'carried over' from
725 * prev into current:
726 */
727 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
728
Nick Piggin4866cde2005-06-25 14:57:23 -0700729 spin_unlock_irq(&rq->lock);
730}
731
732#else /* __ARCH_WANT_UNLOCKED_CTXSW */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700733static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700734{
735#ifdef CONFIG_SMP
736 return p->oncpu;
737#else
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100738 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700739#endif
740}
741
Ingo Molnar70b97a72006-07-03 00:25:42 -0700742static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700743{
744#ifdef CONFIG_SMP
745 /*
746 * We can optimise this out completely for !SMP, because the
747 * SMP rebalancing from interrupt is the only thing that cares
748 * here.
749 */
750 next->oncpu = 1;
751#endif
752#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
753 spin_unlock_irq(&rq->lock);
754#else
755 spin_unlock(&rq->lock);
756#endif
757}
758
Ingo Molnar70b97a72006-07-03 00:25:42 -0700759static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700760{
761#ifdef CONFIG_SMP
762 /*
763 * After ->oncpu is cleared, the task can be moved to a different CPU.
764 * We must ensure this doesn't happen until the switch is completely
765 * finished.
766 */
767 smp_wmb();
768 prev->oncpu = 0;
769#endif
770#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
771 local_irq_enable();
772#endif
773}
774#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
776/*
Ingo Molnarb29739f2006-06-27 02:54:51 -0700777 * __task_rq_lock - lock the runqueue a given task resides on.
778 * Must be called interrupts disabled.
779 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700780static inline struct rq *__task_rq_lock(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700781 __acquires(rq->lock)
782{
Andi Kleen3a5c3592007-10-15 17:00:14 +0200783 for (;;) {
784 struct rq *rq = task_rq(p);
785 spin_lock(&rq->lock);
786 if (likely(rq == task_rq(p)))
787 return rq;
Ingo Molnarb29739f2006-06-27 02:54:51 -0700788 spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700789 }
Ingo Molnarb29739f2006-06-27 02:54:51 -0700790}
791
792/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 * task_rq_lock - lock the runqueue a given task resides on and disable
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100794 * interrupts. Note the ordering: we can safely lookup the task_rq without
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 * explicitly disabling preemption.
796 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700797static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 __acquires(rq->lock)
799{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700800 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
Andi Kleen3a5c3592007-10-15 17:00:14 +0200802 for (;;) {
803 local_irq_save(*flags);
804 rq = task_rq(p);
805 spin_lock(&rq->lock);
806 if (likely(rq == task_rq(p)))
807 return rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
811
Alexey Dobriyana9957442007-10-15 17:00:13 +0200812static void __task_rq_unlock(struct rq *rq)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700813 __releases(rq->lock)
814{
815 spin_unlock(&rq->lock);
816}
817
Ingo Molnar70b97a72006-07-03 00:25:42 -0700818static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 __releases(rq->lock)
820{
821 spin_unlock_irqrestore(&rq->lock, *flags);
822}
823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824/*
Robert P. J. Daycc2a73b2006-12-10 02:20:00 -0800825 * this_rq_lock - lock this runqueue and disable interrupts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200827static struct rq *this_rq_lock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 __acquires(rq->lock)
829{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700830 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
832 local_irq_disable();
833 rq = this_rq();
834 spin_lock(&rq->lock);
835
836 return rq;
837}
838
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200839/*
Ingo Molnar2aa44d02007-08-23 15:18:02 +0200840 * We are going deep-idle (irqs are disabled):
Ingo Molnar1b9f19c2007-07-09 18:51:59 +0200841 */
Ingo Molnar2aa44d02007-08-23 15:18:02 +0200842void sched_clock_idle_sleep_event(void)
Ingo Molnar1b9f19c2007-07-09 18:51:59 +0200843{
Ingo Molnar2aa44d02007-08-23 15:18:02 +0200844 struct rq *rq = cpu_rq(smp_processor_id());
Ingo Molnar1b9f19c2007-07-09 18:51:59 +0200845
Ingo Molnar2aa44d02007-08-23 15:18:02 +0200846 spin_lock(&rq->lock);
847 __update_rq_clock(rq);
848 spin_unlock(&rq->lock);
849 rq->clock_deep_idle_events++;
Ingo Molnar1b9f19c2007-07-09 18:51:59 +0200850}
Ingo Molnar2aa44d02007-08-23 15:18:02 +0200851EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
852
853/*
854 * We just idled delta nanoseconds (called with irqs disabled):
855 */
856void sched_clock_idle_wakeup_event(u64 delta_ns)
857{
858 struct rq *rq = cpu_rq(smp_processor_id());
859 u64 now = sched_clock();
860
Ingo Molnar2bacec82007-12-18 15:21:13 +0100861 touch_softlockup_watchdog();
Ingo Molnar2aa44d02007-08-23 15:18:02 +0200862 rq->idle_clock += delta_ns;
863 /*
864 * Override the previous timestamp and ignore all
865 * sched_clock() deltas that occured while we idled,
866 * and use the PM-provided delta_ns to advance the
867 * rq clock:
868 */
869 spin_lock(&rq->lock);
870 rq->prev_clock_raw = now;
871 rq->clock += delta_ns;
872 spin_unlock(&rq->lock);
873}
874EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
Ingo Molnar1b9f19c2007-07-09 18:51:59 +0200875
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100876static void __resched_task(struct task_struct *p, int tif_bit);
877
878static inline void resched_task(struct task_struct *p)
879{
880 __resched_task(p, TIF_NEED_RESCHED);
881}
882
883#ifdef CONFIG_SCHED_HRTICK
884/*
885 * Use HR-timers to deliver accurate preemption points.
886 *
887 * Its all a bit involved since we cannot program an hrt while holding the
888 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
889 * reschedule event.
890 *
891 * When we get rescheduled we reprogram the hrtick_timer outside of the
892 * rq->lock.
893 */
894static inline void resched_hrt(struct task_struct *p)
895{
896 __resched_task(p, TIF_HRTICK_RESCHED);
897}
898
899static inline void resched_rq(struct rq *rq)
900{
901 unsigned long flags;
902
903 spin_lock_irqsave(&rq->lock, flags);
904 resched_task(rq->curr);
905 spin_unlock_irqrestore(&rq->lock, flags);
906}
907
908enum {
909 HRTICK_SET, /* re-programm hrtick_timer */
910 HRTICK_RESET, /* not a new slice */
911};
912
913/*
914 * Use hrtick when:
915 * - enabled by features
916 * - hrtimer is actually high res
917 */
918static inline int hrtick_enabled(struct rq *rq)
919{
920 if (!sched_feat(HRTICK))
921 return 0;
922 return hrtimer_is_hres_active(&rq->hrtick_timer);
923}
924
925/*
926 * Called to set the hrtick timer state.
927 *
928 * called with rq->lock held and irqs disabled
929 */
930static void hrtick_start(struct rq *rq, u64 delay, int reset)
931{
932 assert_spin_locked(&rq->lock);
933
934 /*
935 * preempt at: now + delay
936 */
937 rq->hrtick_expire =
938 ktime_add_ns(rq->hrtick_timer.base->get_time(), delay);
939 /*
940 * indicate we need to program the timer
941 */
942 __set_bit(HRTICK_SET, &rq->hrtick_flags);
943 if (reset)
944 __set_bit(HRTICK_RESET, &rq->hrtick_flags);
945
946 /*
947 * New slices are called from the schedule path and don't need a
948 * forced reschedule.
949 */
950 if (reset)
951 resched_hrt(rq->curr);
952}
953
954static void hrtick_clear(struct rq *rq)
955{
956 if (hrtimer_active(&rq->hrtick_timer))
957 hrtimer_cancel(&rq->hrtick_timer);
958}
959
960/*
961 * Update the timer from the possible pending state.
962 */
963static void hrtick_set(struct rq *rq)
964{
965 ktime_t time;
966 int set, reset;
967 unsigned long flags;
968
969 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
970
971 spin_lock_irqsave(&rq->lock, flags);
972 set = __test_and_clear_bit(HRTICK_SET, &rq->hrtick_flags);
973 reset = __test_and_clear_bit(HRTICK_RESET, &rq->hrtick_flags);
974 time = rq->hrtick_expire;
975 clear_thread_flag(TIF_HRTICK_RESCHED);
976 spin_unlock_irqrestore(&rq->lock, flags);
977
978 if (set) {
979 hrtimer_start(&rq->hrtick_timer, time, HRTIMER_MODE_ABS);
980 if (reset && !hrtimer_active(&rq->hrtick_timer))
981 resched_rq(rq);
982 } else
983 hrtick_clear(rq);
984}
985
986/*
987 * High-resolution timer tick.
988 * Runs from hardirq context with interrupts disabled.
989 */
990static enum hrtimer_restart hrtick(struct hrtimer *timer)
991{
992 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
993
994 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
995
996 spin_lock(&rq->lock);
997 __update_rq_clock(rq);
998 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
999 spin_unlock(&rq->lock);
1000
1001 return HRTIMER_NORESTART;
1002}
1003
1004static inline void init_rq_hrtick(struct rq *rq)
1005{
1006 rq->hrtick_flags = 0;
1007 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1008 rq->hrtick_timer.function = hrtick;
1009 rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
1010}
1011
1012void hrtick_resched(void)
1013{
1014 struct rq *rq;
1015 unsigned long flags;
1016
1017 if (!test_thread_flag(TIF_HRTICK_RESCHED))
1018 return;
1019
1020 local_irq_save(flags);
1021 rq = cpu_rq(smp_processor_id());
1022 hrtick_set(rq);
1023 local_irq_restore(flags);
1024}
1025#else
1026static inline void hrtick_clear(struct rq *rq)
1027{
1028}
1029
1030static inline void hrtick_set(struct rq *rq)
1031{
1032}
1033
1034static inline void init_rq_hrtick(struct rq *rq)
1035{
1036}
1037
1038void hrtick_resched(void)
1039{
1040}
1041#endif
1042
Ingo Molnar1b9f19c2007-07-09 18:51:59 +02001043/*
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001044 * resched_task - mark a task 'to be rescheduled now'.
1045 *
1046 * On UP this means the setting of the need_resched flag, on SMP it
1047 * might also involve a cross-CPU call to trigger the scheduler on
1048 * the target CPU.
1049 */
1050#ifdef CONFIG_SMP
1051
1052#ifndef tsk_is_polling
1053#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1054#endif
1055
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001056static void __resched_task(struct task_struct *p, int tif_bit)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001057{
1058 int cpu;
1059
1060 assert_spin_locked(&task_rq(p)->lock);
1061
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001062 if (unlikely(test_tsk_thread_flag(p, tif_bit)))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001063 return;
1064
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001065 set_tsk_thread_flag(p, tif_bit);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001066
1067 cpu = task_cpu(p);
1068 if (cpu == smp_processor_id())
1069 return;
1070
1071 /* NEED_RESCHED must be visible before we test polling */
1072 smp_mb();
1073 if (!tsk_is_polling(p))
1074 smp_send_reschedule(cpu);
1075}
1076
1077static void resched_cpu(int cpu)
1078{
1079 struct rq *rq = cpu_rq(cpu);
1080 unsigned long flags;
1081
1082 if (!spin_trylock_irqsave(&rq->lock, flags))
1083 return;
1084 resched_task(cpu_curr(cpu));
1085 spin_unlock_irqrestore(&rq->lock, flags);
1086}
1087#else
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001088static void __resched_task(struct task_struct *p, int tif_bit)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001089{
1090 assert_spin_locked(&task_rq(p)->lock);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001091 set_tsk_thread_flag(p, tif_bit);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001092}
1093#endif
1094
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001095#if BITS_PER_LONG == 32
1096# define WMULT_CONST (~0UL)
1097#else
1098# define WMULT_CONST (1UL << 32)
1099#endif
1100
1101#define WMULT_SHIFT 32
1102
Ingo Molnar194081e2007-08-09 11:16:51 +02001103/*
1104 * Shift right and round:
1105 */
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001106#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
Ingo Molnar194081e2007-08-09 11:16:51 +02001107
Ingo Molnarcb1c4fc2007-08-02 17:41:40 +02001108static unsigned long
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001109calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1110 struct load_weight *lw)
1111{
1112 u64 tmp;
1113
1114 if (unlikely(!lw->inv_weight))
Ingo Molnar194081e2007-08-09 11:16:51 +02001115 lw->inv_weight = (WMULT_CONST - lw->weight/2) / lw->weight + 1;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001116
1117 tmp = (u64)delta_exec * weight;
1118 /*
1119 * Check whether we'd overflow the 64-bit multiplication:
1120 */
Ingo Molnar194081e2007-08-09 11:16:51 +02001121 if (unlikely(tmp > WMULT_CONST))
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001122 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
Ingo Molnar194081e2007-08-09 11:16:51 +02001123 WMULT_SHIFT/2);
1124 else
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001125 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001126
Ingo Molnarecf691d2007-08-02 17:41:40 +02001127 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001128}
1129
1130static inline unsigned long
1131calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
1132{
1133 return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
1134}
1135
Ingo Molnar10919852007-10-15 17:00:04 +02001136static inline void update_load_add(struct load_weight *lw, unsigned long inc)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001137{
1138 lw->weight += inc;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001139}
1140
Ingo Molnar10919852007-10-15 17:00:04 +02001141static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001142{
1143 lw->weight -= dec;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001144}
1145
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001147 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1148 * of tasks with abnormal "nice" values across CPUs the contribution that
1149 * each task makes to its run queue's load is weighted according to its
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01001150 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
Peter Williams2dd73a42006-06-27 02:54:34 -07001151 * scaled version of the new time slice allocation that they receive on time
1152 * slice expiry etc.
1153 */
1154
Ingo Molnardd41f592007-07-09 18:51:59 +02001155#define WEIGHT_IDLEPRIO 2
1156#define WMULT_IDLEPRIO (1 << 31)
1157
1158/*
1159 * Nice levels are multiplicative, with a gentle 10% change for every
1160 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1161 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1162 * that remained on nice 0.
1163 *
1164 * The "10% effect" is relative and cumulative: from _any_ nice level,
1165 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
Ingo Molnarf9153ee2007-07-16 09:46:30 +02001166 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1167 * If a task goes up by ~10% and another task goes down by ~10% then
1168 * the relative distance between them is ~25%.)
Ingo Molnardd41f592007-07-09 18:51:59 +02001169 */
1170static const int prio_to_weight[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001171 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1172 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1173 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1174 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1175 /* 0 */ 1024, 820, 655, 526, 423,
1176 /* 5 */ 335, 272, 215, 172, 137,
1177 /* 10 */ 110, 87, 70, 56, 45,
1178 /* 15 */ 36, 29, 23, 18, 15,
Ingo Molnardd41f592007-07-09 18:51:59 +02001179};
1180
Ingo Molnar5714d2d2007-07-16 09:46:31 +02001181/*
1182 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1183 *
1184 * In cases where the weight does not change often, we can use the
1185 * precalculated inverse to speed up arithmetics by turning divisions
1186 * into multiplications:
1187 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001188static const u32 prio_to_wmult[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001189 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1190 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1191 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1192 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1193 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1194 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1195 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1196 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
Ingo Molnardd41f592007-07-09 18:51:59 +02001197};
Peter Williams2dd73a42006-06-27 02:54:34 -07001198
Ingo Molnardd41f592007-07-09 18:51:59 +02001199static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
1200
1201/*
1202 * runqueue iterator, to support SMP load-balancing between different
1203 * scheduling classes, without having to expose their internal data
1204 * structures to the load-balancing proper:
1205 */
1206struct rq_iterator {
1207 void *arg;
1208 struct task_struct *(*start)(void *);
1209 struct task_struct *(*next)(void *);
1210};
1211
Peter Williamse1d14842007-10-24 18:23:51 +02001212#ifdef CONFIG_SMP
1213static unsigned long
1214balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1215 unsigned long max_load_move, struct sched_domain *sd,
1216 enum cpu_idle_type idle, int *all_pinned,
1217 int *this_best_prio, struct rq_iterator *iterator);
1218
1219static int
1220iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1221 struct sched_domain *sd, enum cpu_idle_type idle,
1222 struct rq_iterator *iterator);
Peter Williamse1d14842007-10-24 18:23:51 +02001223#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02001224
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001225#ifdef CONFIG_CGROUP_CPUACCT
1226static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
1227#else
1228static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
1229#endif
1230
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +01001231static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1232{
1233 update_load_add(&rq->load, load);
1234}
1235
1236static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1237{
1238 update_load_sub(&rq->load, load);
1239}
1240
Gregory Haskinse7693a32008-01-25 21:08:09 +01001241#ifdef CONFIG_SMP
1242static unsigned long source_load(int cpu, int type);
1243static unsigned long target_load(int cpu, int type);
1244static unsigned long cpu_avg_load_per_task(int cpu);
1245static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1246#endif /* CONFIG_SMP */
1247
Ingo Molnardd41f592007-07-09 18:51:59 +02001248#include "sched_stats.h"
Ingo Molnardd41f592007-07-09 18:51:59 +02001249#include "sched_idletask.c"
Ingo Molnar5522d5d2007-10-15 17:00:12 +02001250#include "sched_fair.c"
1251#include "sched_rt.c"
Ingo Molnardd41f592007-07-09 18:51:59 +02001252#ifdef CONFIG_SCHED_DEBUG
1253# include "sched_debug.c"
1254#endif
1255
1256#define sched_class_highest (&rt_sched_class)
1257
Ingo Molnare5fa2232007-08-09 11:16:49 +02001258static void inc_nr_running(struct task_struct *p, struct rq *rq)
Ingo Molnar9c217242007-08-02 17:41:40 +02001259{
1260 rq->nr_running++;
Ingo Molnar9c217242007-08-02 17:41:40 +02001261}
1262
Ingo Molnardb531812007-08-09 11:16:49 +02001263static void dec_nr_running(struct task_struct *p, struct rq *rq)
Ingo Molnar9c217242007-08-02 17:41:40 +02001264{
1265 rq->nr_running--;
Ingo Molnar9c217242007-08-02 17:41:40 +02001266}
1267
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001268static void set_load_weight(struct task_struct *p)
1269{
1270 if (task_has_rt_policy(p)) {
Ingo Molnardd41f592007-07-09 18:51:59 +02001271 p->se.load.weight = prio_to_weight[0] * 2;
1272 p->se.load.inv_weight = prio_to_wmult[0] >> 1;
1273 return;
1274 }
1275
1276 /*
1277 * SCHED_IDLE tasks get minimal weight:
1278 */
1279 if (p->policy == SCHED_IDLE) {
1280 p->se.load.weight = WEIGHT_IDLEPRIO;
1281 p->se.load.inv_weight = WMULT_IDLEPRIO;
1282 return;
1283 }
1284
1285 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1286 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001287}
1288
Ingo Molnar8159f872007-08-09 11:16:49 +02001289static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001290{
1291 sched_info_queued(p);
Ingo Molnarfd390f62007-08-09 11:16:48 +02001292 p->sched_class->enqueue_task(rq, p, wakeup);
Ingo Molnardd41f592007-07-09 18:51:59 +02001293 p->se.on_rq = 1;
1294}
1295
Ingo Molnar69be72c2007-08-09 11:16:49 +02001296static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnardd41f592007-07-09 18:51:59 +02001297{
Ingo Molnarf02231e2007-08-09 11:16:48 +02001298 p->sched_class->dequeue_task(rq, p, sleep);
Ingo Molnardd41f592007-07-09 18:51:59 +02001299 p->se.on_rq = 0;
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001300}
1301
1302/*
Ingo Molnardd41f592007-07-09 18:51:59 +02001303 * __normal_prio - return the priority that is based on the static prio
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001304 */
Ingo Molnar14531182007-07-09 18:51:59 +02001305static inline int __normal_prio(struct task_struct *p)
1306{
Ingo Molnardd41f592007-07-09 18:51:59 +02001307 return p->static_prio;
Ingo Molnar14531182007-07-09 18:51:59 +02001308}
1309
1310/*
Ingo Molnarb29739f2006-06-27 02:54:51 -07001311 * Calculate the expected normal priority: i.e. priority
1312 * without taking RT-inheritance into account. Might be
1313 * boosted by interactivity modifiers. Changes upon fork,
1314 * setprio syscalls, and whenever the interactivity
1315 * estimator recalculates.
1316 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001317static inline int normal_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07001318{
1319 int prio;
1320
Ingo Molnare05606d2007-07-09 18:51:59 +02001321 if (task_has_rt_policy(p))
Ingo Molnarb29739f2006-06-27 02:54:51 -07001322 prio = MAX_RT_PRIO-1 - p->rt_priority;
1323 else
1324 prio = __normal_prio(p);
1325 return prio;
1326}
1327
1328/*
1329 * Calculate the current priority, i.e. the priority
1330 * taken into account by the scheduler. This value might
1331 * be boosted by RT tasks, or might be boosted by
1332 * interactivity modifiers. Will be RT if the task got
1333 * RT-boosted. If not then it returns p->normal_prio.
1334 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001335static int effective_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07001336{
1337 p->normal_prio = normal_prio(p);
1338 /*
1339 * If we are RT tasks or we were boosted to RT priority,
1340 * keep the priority unchanged. Otherwise, update priority
1341 * to the normal priority:
1342 */
1343 if (!rt_prio(p->prio))
1344 return p->normal_prio;
1345 return p->prio;
1346}
1347
1348/*
Ingo Molnardd41f592007-07-09 18:51:59 +02001349 * activate_task - move a task to the runqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001351static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352{
Ingo Molnardd41f592007-07-09 18:51:59 +02001353 if (p->state == TASK_UNINTERRUPTIBLE)
1354 rq->nr_uninterruptible--;
1355
Ingo Molnar8159f872007-08-09 11:16:49 +02001356 enqueue_task(rq, p, wakeup);
Ingo Molnare5fa2232007-08-09 11:16:49 +02001357 inc_nr_running(p, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358}
1359
1360/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 * deactivate_task - remove a task from the runqueue.
1362 */
Ingo Molnar2e1cb742007-08-09 11:16:49 +02001363static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364{
Ingo Molnardd41f592007-07-09 18:51:59 +02001365 if (p->state == TASK_UNINTERRUPTIBLE)
1366 rq->nr_uninterruptible++;
1367
Ingo Molnar69be72c2007-08-09 11:16:49 +02001368 dequeue_task(rq, p, sleep);
Ingo Molnardb531812007-08-09 11:16:49 +02001369 dec_nr_running(p, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370}
1371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372/**
1373 * task_curr - is this task currently executing on a CPU?
1374 * @p: the task in question.
1375 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001376inline int task_curr(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377{
1378 return cpu_curr(task_cpu(p)) == p;
1379}
1380
Peter Williams2dd73a42006-06-27 02:54:34 -07001381/* Used instead of source_load when we know the type == 0 */
1382unsigned long weighted_cpuload(const int cpu)
1383{
Dmitry Adamushko495eca42007-10-15 17:00:06 +02001384 return cpu_rq(cpu)->load.weight;
Ingo Molnardd41f592007-07-09 18:51:59 +02001385}
1386
1387static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1388{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001389 set_task_rq(p, cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02001390#ifdef CONFIG_SMP
Dmitry Adamushkoce96b5a2007-11-15 20:57:40 +01001391 /*
1392 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1393 * successfuly executed on another CPU. We must ensure that updates of
1394 * per-task data have been completed by this moment.
1395 */
1396 smp_wmb();
Ingo Molnardd41f592007-07-09 18:51:59 +02001397 task_thread_info(p)->cpu = cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02001398#endif
Peter Williams2dd73a42006-06-27 02:54:34 -07001399}
1400
Steven Rostedtcb469842008-01-25 21:08:22 +01001401static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1402 const struct sched_class *prev_class,
1403 int oldprio, int running)
1404{
1405 if (prev_class != p->sched_class) {
1406 if (prev_class->switched_from)
1407 prev_class->switched_from(rq, p, running);
1408 p->sched_class->switched_to(rq, p, running);
1409 } else
1410 p->sched_class->prio_changed(rq, p, oldprio, running);
1411}
1412
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413#ifdef CONFIG_SMP
Ingo Molnarc65cc872007-07-09 18:51:58 +02001414
Ingo Molnarcc367732007-10-15 17:00:18 +02001415/*
1416 * Is this task likely cache-hot:
1417 */
Gregory Haskinse7693a32008-01-25 21:08:09 +01001418static int
Ingo Molnarcc367732007-10-15 17:00:18 +02001419task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1420{
1421 s64 delta;
1422
1423 if (p->sched_class != &fair_sched_class)
1424 return 0;
1425
Ingo Molnar6bc16652007-10-15 17:00:18 +02001426 if (sysctl_sched_migration_cost == -1)
1427 return 1;
1428 if (sysctl_sched_migration_cost == 0)
1429 return 0;
1430
Ingo Molnarcc367732007-10-15 17:00:18 +02001431 delta = now - p->se.exec_start;
1432
1433 return delta < (s64)sysctl_sched_migration_cost;
1434}
1435
1436
Ingo Molnardd41f592007-07-09 18:51:59 +02001437void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Ingo Molnarc65cc872007-07-09 18:51:58 +02001438{
Ingo Molnardd41f592007-07-09 18:51:59 +02001439 int old_cpu = task_cpu(p);
1440 struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
Srivatsa Vaddagiri2830cf82007-10-15 17:00:12 +02001441 struct cfs_rq *old_cfsrq = task_cfs_rq(p),
1442 *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
Ingo Molnarbbdba7c2007-10-15 17:00:06 +02001443 u64 clock_offset;
Ingo Molnardd41f592007-07-09 18:51:59 +02001444
1445 clock_offset = old_rq->clock - new_rq->clock;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001446
1447#ifdef CONFIG_SCHEDSTATS
1448 if (p->se.wait_start)
1449 p->se.wait_start -= clock_offset;
Ingo Molnardd41f592007-07-09 18:51:59 +02001450 if (p->se.sleep_start)
1451 p->se.sleep_start -= clock_offset;
1452 if (p->se.block_start)
1453 p->se.block_start -= clock_offset;
Ingo Molnarcc367732007-10-15 17:00:18 +02001454 if (old_cpu != new_cpu) {
1455 schedstat_inc(p, se.nr_migrations);
1456 if (task_hot(p, old_rq->clock, NULL))
1457 schedstat_inc(p, se.nr_forced2_migrations);
1458 }
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001459#endif
Srivatsa Vaddagiri2830cf82007-10-15 17:00:12 +02001460 p->se.vruntime -= old_cfsrq->min_vruntime -
1461 new_cfsrq->min_vruntime;
Ingo Molnardd41f592007-07-09 18:51:59 +02001462
1463 __set_task_cpu(p, new_cpu);
Ingo Molnarc65cc872007-07-09 18:51:58 +02001464}
1465
Ingo Molnar70b97a72006-07-03 00:25:42 -07001466struct migration_req {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Ingo Molnar36c8b582006-07-03 00:25:41 -07001469 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 int dest_cpu;
1471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 struct completion done;
Ingo Molnar70b97a72006-07-03 00:25:42 -07001473};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474
1475/*
1476 * The task's runqueue lock must be held.
1477 * Returns true if you have to wait for migration thread.
1478 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001479static int
Ingo Molnar70b97a72006-07-03 00:25:42 -07001480migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481{
Ingo Molnar70b97a72006-07-03 00:25:42 -07001482 struct rq *rq = task_rq(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
1484 /*
1485 * If the task is not on a runqueue (and not running), then
1486 * it is sufficient to simply update the task's cpu field.
1487 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001488 if (!p->se.on_rq && !task_running(rq, p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 set_task_cpu(p, dest_cpu);
1490 return 0;
1491 }
1492
1493 init_completion(&req->done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 req->task = p;
1495 req->dest_cpu = dest_cpu;
1496 list_add(&req->list, &rq->migration_queue);
Ingo Molnar48f24c42006-07-03 00:25:40 -07001497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 return 1;
1499}
1500
1501/*
1502 * wait_task_inactive - wait for a thread to unschedule.
1503 *
1504 * The caller must ensure that the task *will* unschedule sometime soon,
1505 * else this function might spin for a *long* time. This function can't
1506 * be called with interrupts off, or it may introduce deadlock with
1507 * smp_call_function() if an IPI is sent by the same process we are
1508 * waiting to become inactive.
1509 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001510void wait_task_inactive(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511{
1512 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02001513 int running, on_rq;
Ingo Molnar70b97a72006-07-03 00:25:42 -07001514 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
Andi Kleen3a5c3592007-10-15 17:00:14 +02001516 for (;;) {
1517 /*
1518 * We do the initial early heuristics without holding
1519 * any task-queue locks at all. We'll only try to get
1520 * the runqueue lock when things look like they will
1521 * work out!
1522 */
1523 rq = task_rq(p);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07001524
Andi Kleen3a5c3592007-10-15 17:00:14 +02001525 /*
1526 * If the task is actively running on another CPU
1527 * still, just relax and busy-wait without holding
1528 * any locks.
1529 *
1530 * NOTE! Since we don't hold any locks, it's not
1531 * even sure that "rq" stays as the right runqueue!
1532 * But we don't care, since "task_running()" will
1533 * return false if the runqueue has changed and p
1534 * is actually now running somewhere else!
1535 */
1536 while (task_running(rq, p))
1537 cpu_relax();
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07001538
Andi Kleen3a5c3592007-10-15 17:00:14 +02001539 /*
1540 * Ok, time to look more closely! We need the rq
1541 * lock now, to be *sure*. If we're wrong, we'll
1542 * just go back and repeat.
1543 */
1544 rq = task_rq_lock(p, &flags);
1545 running = task_running(rq, p);
1546 on_rq = p->se.on_rq;
1547 task_rq_unlock(rq, &flags);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07001548
Andi Kleen3a5c3592007-10-15 17:00:14 +02001549 /*
1550 * Was it really running after all now that we
1551 * checked with the proper locks actually held?
1552 *
1553 * Oops. Go back and try again..
1554 */
1555 if (unlikely(running)) {
1556 cpu_relax();
1557 continue;
1558 }
1559
1560 /*
1561 * It's not enough that it's not actively running,
1562 * it must be off the runqueue _entirely_, and not
1563 * preempted!
1564 *
1565 * So if it wa still runnable (but just not actively
1566 * running right now), it's preempted, and we should
1567 * yield - it could be a while.
1568 */
1569 if (unlikely(on_rq)) {
1570 schedule_timeout_uninterruptible(1);
1571 continue;
1572 }
1573
1574 /*
1575 * Ahh, all good. It wasn't running, and it wasn't
1576 * runnable, which means that it will never become
1577 * running in the future either. We're all done!
1578 */
1579 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581}
1582
1583/***
1584 * kick_process - kick a running thread to enter/exit the kernel
1585 * @p: the to-be-kicked thread
1586 *
1587 * Cause a process which is running on another CPU to enter
1588 * kernel-mode, without any delay. (to get signals handled.)
1589 *
1590 * NOTE: this function doesnt have to take the runqueue lock,
1591 * because all it wants to ensure is that the remote task enters
1592 * the kernel. If the IPI races and the task has been migrated
1593 * to another CPU then no harm is done and the purpose has been
1594 * achieved as well.
1595 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001596void kick_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597{
1598 int cpu;
1599
1600 preempt_disable();
1601 cpu = task_cpu(p);
1602 if ((cpu != smp_processor_id()) && task_curr(p))
1603 smp_send_reschedule(cpu);
1604 preempt_enable();
1605}
1606
1607/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001608 * Return a low guess at the load of a migration-source cpu weighted
1609 * according to the scheduling class and "nice" value.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 *
1611 * We want to under-estimate the load of migration sources, to
1612 * balance conservatively.
1613 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02001614static unsigned long source_load(int cpu, int type)
Con Kolivasb9104722005-11-08 21:38:55 -08001615{
Ingo Molnar70b97a72006-07-03 00:25:42 -07001616 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02001617 unsigned long total = weighted_cpuload(cpu);
Nick Piggina2000572006-02-10 01:51:02 -08001618
Peter Williams2dd73a42006-06-27 02:54:34 -07001619 if (type == 0)
Ingo Molnardd41f592007-07-09 18:51:59 +02001620 return total;
Peter Williams2dd73a42006-06-27 02:54:34 -07001621
Ingo Molnardd41f592007-07-09 18:51:59 +02001622 return min(rq->cpu_load[type-1], total);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623}
1624
1625/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001626 * Return a high guess at the load of a migration-target cpu weighted
1627 * according to the scheduling class and "nice" value.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02001629static unsigned long target_load(int cpu, int type)
Con Kolivasb9104722005-11-08 21:38:55 -08001630{
Ingo Molnar70b97a72006-07-03 00:25:42 -07001631 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02001632 unsigned long total = weighted_cpuload(cpu);
Nick Piggina2000572006-02-10 01:51:02 -08001633
Peter Williams2dd73a42006-06-27 02:54:34 -07001634 if (type == 0)
Ingo Molnardd41f592007-07-09 18:51:59 +02001635 return total;
Peter Williams2dd73a42006-06-27 02:54:34 -07001636
Ingo Molnardd41f592007-07-09 18:51:59 +02001637 return max(rq->cpu_load[type-1], total);
Peter Williams2dd73a42006-06-27 02:54:34 -07001638}
1639
1640/*
1641 * Return the average load per task on the cpu's run queue
1642 */
Gregory Haskinse7693a32008-01-25 21:08:09 +01001643static unsigned long cpu_avg_load_per_task(int cpu)
Peter Williams2dd73a42006-06-27 02:54:34 -07001644{
Ingo Molnar70b97a72006-07-03 00:25:42 -07001645 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02001646 unsigned long total = weighted_cpuload(cpu);
Peter Williams2dd73a42006-06-27 02:54:34 -07001647 unsigned long n = rq->nr_running;
1648
Ingo Molnardd41f592007-07-09 18:51:59 +02001649 return n ? total / n : SCHED_LOAD_SCALE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650}
1651
Nick Piggin147cbb42005-06-25 14:57:19 -07001652/*
1653 * find_idlest_group finds and returns the least busy CPU group within the
1654 * domain.
1655 */
1656static struct sched_group *
1657find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
1658{
1659 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1660 unsigned long min_load = ULONG_MAX, this_load = 0;
1661 int load_idx = sd->forkexec_idx;
1662 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1663
1664 do {
1665 unsigned long load, avg_load;
1666 int local_group;
1667 int i;
1668
M.Baris Demirayda5a5522005-09-10 00:26:09 -07001669 /* Skip over this group if it has no CPUs allowed */
1670 if (!cpus_intersects(group->cpumask, p->cpus_allowed))
Andi Kleen3a5c3592007-10-15 17:00:14 +02001671 continue;
M.Baris Demirayda5a5522005-09-10 00:26:09 -07001672
Nick Piggin147cbb42005-06-25 14:57:19 -07001673 local_group = cpu_isset(this_cpu, group->cpumask);
Nick Piggin147cbb42005-06-25 14:57:19 -07001674
1675 /* Tally up the load of all CPUs in the group */
1676 avg_load = 0;
1677
1678 for_each_cpu_mask(i, group->cpumask) {
1679 /* Bias balancing toward cpus of our domain */
1680 if (local_group)
1681 load = source_load(i, load_idx);
1682 else
1683 load = target_load(i, load_idx);
1684
1685 avg_load += load;
1686 }
1687
1688 /* Adjust by relative CPU power of the group */
Eric Dumazet5517d862007-05-08 00:32:57 -07001689 avg_load = sg_div_cpu_power(group,
1690 avg_load * SCHED_LOAD_SCALE);
Nick Piggin147cbb42005-06-25 14:57:19 -07001691
1692 if (local_group) {
1693 this_load = avg_load;
1694 this = group;
1695 } else if (avg_load < min_load) {
1696 min_load = avg_load;
1697 idlest = group;
1698 }
Andi Kleen3a5c3592007-10-15 17:00:14 +02001699 } while (group = group->next, group != sd->groups);
Nick Piggin147cbb42005-06-25 14:57:19 -07001700
1701 if (!idlest || 100*this_load < imbalance*min_load)
1702 return NULL;
1703 return idlest;
1704}
1705
1706/*
Satoru Takeuchi0feaece2006-10-03 01:14:10 -07001707 * find_idlest_cpu - find the idlest cpu among the cpus in group.
Nick Piggin147cbb42005-06-25 14:57:19 -07001708 */
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07001709static int
1710find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
Nick Piggin147cbb42005-06-25 14:57:19 -07001711{
M.Baris Demirayda5a5522005-09-10 00:26:09 -07001712 cpumask_t tmp;
Nick Piggin147cbb42005-06-25 14:57:19 -07001713 unsigned long load, min_load = ULONG_MAX;
1714 int idlest = -1;
1715 int i;
1716
M.Baris Demirayda5a5522005-09-10 00:26:09 -07001717 /* Traverse only the allowed CPUs */
1718 cpus_and(tmp, group->cpumask, p->cpus_allowed);
1719
1720 for_each_cpu_mask(i, tmp) {
Peter Williams2dd73a42006-06-27 02:54:34 -07001721 load = weighted_cpuload(i);
Nick Piggin147cbb42005-06-25 14:57:19 -07001722
1723 if (load < min_load || (load == min_load && i == this_cpu)) {
1724 min_load = load;
1725 idlest = i;
1726 }
1727 }
1728
1729 return idlest;
1730}
1731
Nick Piggin476d1392005-06-25 14:57:29 -07001732/*
1733 * sched_balance_self: balance the current task (running on cpu) in domains
1734 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1735 * SD_BALANCE_EXEC.
1736 *
1737 * Balance, ie. select the least loaded group.
1738 *
1739 * Returns the target CPU number, or the same CPU if no balancing is needed.
1740 *
1741 * preempt must be disabled.
1742 */
1743static int sched_balance_self(int cpu, int flag)
1744{
1745 struct task_struct *t = current;
1746 struct sched_domain *tmp, *sd = NULL;
Nick Piggin147cbb42005-06-25 14:57:19 -07001747
Chen, Kenneth Wc96d1452006-06-27 02:54:28 -07001748 for_each_domain(cpu, tmp) {
Ingo Molnar9761eea2007-07-09 18:52:00 +02001749 /*
1750 * If power savings logic is enabled for a domain, stop there.
1751 */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07001752 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1753 break;
Nick Piggin476d1392005-06-25 14:57:29 -07001754 if (tmp->flags & flag)
1755 sd = tmp;
Chen, Kenneth Wc96d1452006-06-27 02:54:28 -07001756 }
Nick Piggin476d1392005-06-25 14:57:29 -07001757
1758 while (sd) {
1759 cpumask_t span;
1760 struct sched_group *group;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07001761 int new_cpu, weight;
1762
1763 if (!(sd->flags & flag)) {
1764 sd = sd->child;
1765 continue;
1766 }
Nick Piggin476d1392005-06-25 14:57:29 -07001767
1768 span = sd->span;
1769 group = find_idlest_group(sd, t, cpu);
Siddha, Suresh B1a848872006-10-03 01:14:08 -07001770 if (!group) {
1771 sd = sd->child;
1772 continue;
1773 }
Nick Piggin476d1392005-06-25 14:57:29 -07001774
M.Baris Demirayda5a5522005-09-10 00:26:09 -07001775 new_cpu = find_idlest_cpu(group, t, cpu);
Siddha, Suresh B1a848872006-10-03 01:14:08 -07001776 if (new_cpu == -1 || new_cpu == cpu) {
1777 /* Now try balancing at a lower domain level of cpu */
1778 sd = sd->child;
1779 continue;
1780 }
Nick Piggin476d1392005-06-25 14:57:29 -07001781
Siddha, Suresh B1a848872006-10-03 01:14:08 -07001782 /* Now try balancing at a lower domain level of new_cpu */
Nick Piggin476d1392005-06-25 14:57:29 -07001783 cpu = new_cpu;
Nick Piggin476d1392005-06-25 14:57:29 -07001784 sd = NULL;
1785 weight = cpus_weight(span);
1786 for_each_domain(cpu, tmp) {
1787 if (weight <= cpus_weight(tmp->span))
1788 break;
1789 if (tmp->flags & flag)
1790 sd = tmp;
1791 }
1792 /* while loop will break here if sd == NULL */
1793 }
1794
1795 return cpu;
1796}
1797
1798#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800/***
1801 * try_to_wake_up - wake up a thread
1802 * @p: the to-be-woken-up thread
1803 * @state: the mask of task states that can be woken
1804 * @sync: do a synchronous wakeup?
1805 *
1806 * Put it on the run-queue if it's not already there. The "current"
1807 * thread is always on the run-queue (except when the actual
1808 * re-schedule is in progress), and as such you're allowed to do
1809 * the simpler "current->state = TASK_RUNNING" to mark yourself
1810 * runnable without the overhead of this.
1811 *
1812 * returns failure only if the task is already active.
1813 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001814static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815{
Ingo Molnarcc367732007-10-15 17:00:18 +02001816 int cpu, orig_cpu, this_cpu, success = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 unsigned long flags;
1818 long old_state;
Ingo Molnar70b97a72006-07-03 00:25:42 -07001819 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
1821 rq = task_rq_lock(p, &flags);
1822 old_state = p->state;
1823 if (!(old_state & state))
1824 goto out;
1825
Ingo Molnardd41f592007-07-09 18:51:59 +02001826 if (p->se.on_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 goto out_running;
1828
1829 cpu = task_cpu(p);
Ingo Molnarcc367732007-10-15 17:00:18 +02001830 orig_cpu = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 this_cpu = smp_processor_id();
1832
1833#ifdef CONFIG_SMP
1834 if (unlikely(task_running(rq, p)))
1835 goto out_activate;
1836
Dmitry Adamushko5d2f5a62008-01-25 21:08:21 +01001837 cpu = p->sched_class->select_task_rq(p, sync);
1838 if (cpu != orig_cpu) {
1839 set_task_cpu(p, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 task_rq_unlock(rq, &flags);
1841 /* might preempt at this point */
1842 rq = task_rq_lock(p, &flags);
1843 old_state = p->state;
1844 if (!(old_state & state))
1845 goto out;
Ingo Molnardd41f592007-07-09 18:51:59 +02001846 if (p->se.on_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 goto out_running;
1848
1849 this_cpu = smp_processor_id();
1850 cpu = task_cpu(p);
1851 }
1852
Gregory Haskinse7693a32008-01-25 21:08:09 +01001853#ifdef CONFIG_SCHEDSTATS
1854 schedstat_inc(rq, ttwu_count);
1855 if (cpu == this_cpu)
1856 schedstat_inc(rq, ttwu_local);
1857 else {
1858 struct sched_domain *sd;
1859 for_each_domain(this_cpu, sd) {
1860 if (cpu_isset(cpu, sd->span)) {
1861 schedstat_inc(sd, ttwu_wake_remote);
1862 break;
1863 }
1864 }
1865 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01001866#endif
1867
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868out_activate:
1869#endif /* CONFIG_SMP */
Ingo Molnarcc367732007-10-15 17:00:18 +02001870 schedstat_inc(p, se.nr_wakeups);
1871 if (sync)
1872 schedstat_inc(p, se.nr_wakeups_sync);
1873 if (orig_cpu != cpu)
1874 schedstat_inc(p, se.nr_wakeups_migrate);
1875 if (cpu == this_cpu)
1876 schedstat_inc(p, se.nr_wakeups_local);
1877 else
1878 schedstat_inc(p, se.nr_wakeups_remote);
Ingo Molnar2daa3572007-08-09 11:16:51 +02001879 update_rq_clock(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02001880 activate_task(rq, p, 1);
Ingo Molnar9c63d9c2007-10-15 17:00:20 +02001881 check_preempt_curr(rq, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 success = 1;
1883
1884out_running:
1885 p->state = TASK_RUNNING;
Steven Rostedt9a897c52008-01-25 21:08:22 +01001886#ifdef CONFIG_SMP
1887 if (p->sched_class->task_wake_up)
1888 p->sched_class->task_wake_up(rq, p);
1889#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890out:
1891 task_rq_unlock(rq, &flags);
1892
1893 return success;
1894}
1895
Ingo Molnar36c8b582006-07-03 00:25:41 -07001896int fastcall wake_up_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897{
1898 return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
1899 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
1900}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901EXPORT_SYMBOL(wake_up_process);
1902
Ingo Molnar36c8b582006-07-03 00:25:41 -07001903int fastcall wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904{
1905 return try_to_wake_up(p, state, 0);
1906}
1907
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908/*
1909 * Perform scheduler related setup for a newly forked process p.
1910 * p is forked by current.
Ingo Molnardd41f592007-07-09 18:51:59 +02001911 *
1912 * __sched_fork() is basic setup used by init_idle() too:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001914static void __sched_fork(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915{
Ingo Molnardd41f592007-07-09 18:51:59 +02001916 p->se.exec_start = 0;
1917 p->se.sum_exec_runtime = 0;
Ingo Molnarf6cf8912007-08-28 12:53:24 +02001918 p->se.prev_sum_exec_runtime = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001919
1920#ifdef CONFIG_SCHEDSTATS
1921 p->se.wait_start = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02001922 p->se.sum_sleep_runtime = 0;
1923 p->se.sleep_start = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02001924 p->se.block_start = 0;
1925 p->se.sleep_max = 0;
1926 p->se.block_max = 0;
1927 p->se.exec_max = 0;
Ingo Molnareba1ed42007-10-15 17:00:02 +02001928 p->se.slice_max = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02001929 p->se.wait_max = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001930#endif
Nick Piggin476d1392005-06-25 14:57:29 -07001931
Peter Zijlstrafa717062008-01-25 21:08:27 +01001932 INIT_LIST_HEAD(&p->rt.run_list);
Ingo Molnardd41f592007-07-09 18:51:59 +02001933 p->se.on_rq = 0;
Nick Piggin476d1392005-06-25 14:57:29 -07001934
Avi Kivitye107be32007-07-26 13:40:43 +02001935#ifdef CONFIG_PREEMPT_NOTIFIERS
1936 INIT_HLIST_HEAD(&p->preempt_notifiers);
1937#endif
1938
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 /*
1940 * We mark the process as running here, but have not actually
1941 * inserted it onto the runqueue yet. This guarantees that
1942 * nobody will actually run it, and a signal or other external
1943 * event cannot wake it up and insert it on the runqueue either.
1944 */
1945 p->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02001946}
1947
1948/*
1949 * fork()/clone()-time setup:
1950 */
1951void sched_fork(struct task_struct *p, int clone_flags)
1952{
1953 int cpu = get_cpu();
1954
1955 __sched_fork(p);
1956
1957#ifdef CONFIG_SMP
1958 cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
1959#endif
Ingo Molnar02e4bac2007-10-15 17:00:11 +02001960 set_task_cpu(p, cpu);
Ingo Molnarb29739f2006-06-27 02:54:51 -07001961
1962 /*
1963 * Make sure we do not leak PI boosting priority to the child:
1964 */
1965 p->prio = current->normal_prio;
Hiroshi Shimamoto2ddbf952007-10-15 17:00:11 +02001966 if (!rt_prio(p->prio))
1967 p->sched_class = &fair_sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07001968
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07001969#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Ingo Molnardd41f592007-07-09 18:51:59 +02001970 if (likely(sched_info_on()))
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07001971 memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972#endif
Chen, Kenneth Wd6077cb2006-02-14 13:53:10 -08001973#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
Nick Piggin4866cde2005-06-25 14:57:23 -07001974 p->oncpu = 0;
1975#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976#ifdef CONFIG_PREEMPT
Nick Piggin4866cde2005-06-25 14:57:23 -07001977 /* Want to start with kernel preemption disabled. */
Al Viroa1261f52005-11-13 16:06:55 -08001978 task_thread_info(p)->preempt_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979#endif
Nick Piggin476d1392005-06-25 14:57:29 -07001980 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981}
1982
1983/*
1984 * wake_up_new_task - wake up a newly created task for the first time.
1985 *
1986 * This function will do some initial scheduler statistics housekeeping
1987 * that must be done for every newly created context, then puts the task
1988 * on the runqueue and wakes it.
1989 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001990void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991{
1992 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02001993 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
1995 rq = task_rq_lock(p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 BUG_ON(p->state != TASK_RUNNING);
Ingo Molnara8e504d2007-08-09 11:16:47 +02001997 update_rq_clock(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
1999 p->prio = effective_prio(p);
2000
Srivatsa Vaddagirib9dca1e2007-10-17 16:55:11 +02002001 if (!p->sched_class->task_new || !current->se.on_rq) {
Ingo Molnardd41f592007-07-09 18:51:59 +02002002 activate_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 /*
Ingo Molnardd41f592007-07-09 18:51:59 +02002005 * Let the scheduling class do new task startup
2006 * management (if any):
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 */
Ingo Molnaree0827d2007-08-09 11:16:49 +02002008 p->sched_class->task_new(rq, p);
Ingo Molnare5fa2232007-08-09 11:16:49 +02002009 inc_nr_running(p, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 }
Ingo Molnardd41f592007-07-09 18:51:59 +02002011 check_preempt_curr(rq, p);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002012#ifdef CONFIG_SMP
2013 if (p->sched_class->task_wake_up)
2014 p->sched_class->task_wake_up(rq, p);
2015#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002016 task_rq_unlock(rq, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017}
2018
Avi Kivitye107be32007-07-26 13:40:43 +02002019#ifdef CONFIG_PREEMPT_NOTIFIERS
2020
2021/**
Randy Dunlap421cee22007-07-31 00:37:50 -07002022 * preempt_notifier_register - tell me when current is being being preempted & rescheduled
2023 * @notifier: notifier struct to register
Avi Kivitye107be32007-07-26 13:40:43 +02002024 */
2025void preempt_notifier_register(struct preempt_notifier *notifier)
2026{
2027 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2028}
2029EXPORT_SYMBOL_GPL(preempt_notifier_register);
2030
2031/**
2032 * preempt_notifier_unregister - no longer interested in preemption notifications
Randy Dunlap421cee22007-07-31 00:37:50 -07002033 * @notifier: notifier struct to unregister
Avi Kivitye107be32007-07-26 13:40:43 +02002034 *
2035 * This is safe to call from within a preemption notifier.
2036 */
2037void preempt_notifier_unregister(struct preempt_notifier *notifier)
2038{
2039 hlist_del(&notifier->link);
2040}
2041EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2042
2043static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2044{
2045 struct preempt_notifier *notifier;
2046 struct hlist_node *node;
2047
2048 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2049 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2050}
2051
2052static void
2053fire_sched_out_preempt_notifiers(struct task_struct *curr,
2054 struct task_struct *next)
2055{
2056 struct preempt_notifier *notifier;
2057 struct hlist_node *node;
2058
2059 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2060 notifier->ops->sched_out(notifier, next);
2061}
2062
2063#else
2064
2065static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2066{
2067}
2068
2069static void
2070fire_sched_out_preempt_notifiers(struct task_struct *curr,
2071 struct task_struct *next)
2072{
2073}
2074
2075#endif
2076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077/**
Nick Piggin4866cde2005-06-25 14:57:23 -07002078 * prepare_task_switch - prepare to switch tasks
2079 * @rq: the runqueue preparing to switch
Randy Dunlap421cee22007-07-31 00:37:50 -07002080 * @prev: the current task that is being switched out
Nick Piggin4866cde2005-06-25 14:57:23 -07002081 * @next: the task we are going to switch to.
2082 *
2083 * This is called with the rq lock held and interrupts off. It must
2084 * be paired with a subsequent finish_task_switch after the context
2085 * switch.
2086 *
2087 * prepare_task_switch sets up locking and calls architecture specific
2088 * hooks.
2089 */
Avi Kivitye107be32007-07-26 13:40:43 +02002090static inline void
2091prepare_task_switch(struct rq *rq, struct task_struct *prev,
2092 struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -07002093{
Avi Kivitye107be32007-07-26 13:40:43 +02002094 fire_sched_out_preempt_notifiers(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002095 prepare_lock_switch(rq, next);
2096 prepare_arch_switch(next);
2097}
2098
2099/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 * finish_task_switch - clean up after a task-switch
Jeff Garzik344baba2005-09-07 01:15:17 -04002101 * @rq: runqueue associated with task-switch
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 * @prev: the thread we just switched away from.
2103 *
Nick Piggin4866cde2005-06-25 14:57:23 -07002104 * finish_task_switch must be called after the context switch, paired
2105 * with a prepare_task_switch call before the context switch.
2106 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2107 * and do any other architecture-specific cleanup actions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 *
2109 * Note that we may have delayed dropping an mm in context_switch(). If
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002110 * so, we finish that here outside of the runqueue lock. (Doing it
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 * with the lock held can cause deadlocks; see schedule() for
2112 * details.)
2113 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02002114static void finish_task_switch(struct rq *rq, struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 __releases(rq->lock)
2116{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 struct mm_struct *mm = rq->prev_mm;
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002118 long prev_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 rq->prev_mm = NULL;
2121
2122 /*
2123 * A task struct has one reference for the use as "current".
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002124 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002125 * schedule one last time. The schedule call will never return, and
2126 * the scheduled task must drop that reference.
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002127 * The test for TASK_DEAD must occur while the runqueue locks are
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 * still held, otherwise prev could be scheduled on another cpu, die
2129 * there before we look at prev->state, and then the reference would
2130 * be dropped twice.
2131 * Manfred Spraul <manfred@colorfullife.com>
2132 */
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002133 prev_state = prev->state;
Nick Piggin4866cde2005-06-25 14:57:23 -07002134 finish_arch_switch(prev);
2135 finish_lock_switch(rq, prev);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002136#ifdef CONFIG_SMP
2137 if (current->sched_class->post_schedule)
2138 current->sched_class->post_schedule(rq);
2139#endif
Steven Rostedte8fa1362008-01-25 21:08:05 +01002140
Avi Kivitye107be32007-07-26 13:40:43 +02002141 fire_sched_in_preempt_notifiers(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 if (mm)
2143 mmdrop(mm);
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002144 if (unlikely(prev_state == TASK_DEAD)) {
bibo maoc6fd91f2006-03-26 01:38:20 -08002145 /*
2146 * Remove function-return probe instances associated with this
2147 * task and put them back on the free list.
Ingo Molnar9761eea2007-07-09 18:52:00 +02002148 */
bibo maoc6fd91f2006-03-26 01:38:20 -08002149 kprobe_flush_task(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 put_task_struct(prev);
bibo maoc6fd91f2006-03-26 01:38:20 -08002151 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152}
2153
2154/**
2155 * schedule_tail - first thing a freshly forked thread must call.
2156 * @prev: the thread we just switched away from.
2157 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002158asmlinkage void schedule_tail(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 __releases(rq->lock)
2160{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002161 struct rq *rq = this_rq();
2162
Nick Piggin4866cde2005-06-25 14:57:23 -07002163 finish_task_switch(rq, prev);
2164#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2165 /* In this case, finish_task_switch does not reenable preemption */
2166 preempt_enable();
2167#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 if (current->set_child_tid)
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002169 put_user(task_pid_vnr(current), current->set_child_tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170}
2171
2172/*
2173 * context_switch - switch to the new MM and the new
2174 * thread's register state.
2175 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002176static inline void
Ingo Molnar70b97a72006-07-03 00:25:42 -07002177context_switch(struct rq *rq, struct task_struct *prev,
Ingo Molnar36c8b582006-07-03 00:25:41 -07002178 struct task_struct *next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179{
Ingo Molnardd41f592007-07-09 18:51:59 +02002180 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
Avi Kivitye107be32007-07-26 13:40:43 +02002182 prepare_task_switch(rq, prev, next);
Ingo Molnardd41f592007-07-09 18:51:59 +02002183 mm = next->mm;
2184 oldmm = prev->active_mm;
Zachary Amsden9226d122007-02-13 13:26:21 +01002185 /*
2186 * For paravirt, this is coupled with an exit in switch_to to
2187 * combine the page table reload and the switch backend into
2188 * one hypercall.
2189 */
2190 arch_enter_lazy_cpu_mode();
2191
Ingo Molnardd41f592007-07-09 18:51:59 +02002192 if (unlikely(!mm)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 next->active_mm = oldmm;
2194 atomic_inc(&oldmm->mm_count);
2195 enter_lazy_tlb(oldmm, next);
2196 } else
2197 switch_mm(oldmm, mm, next);
2198
Ingo Molnardd41f592007-07-09 18:51:59 +02002199 if (unlikely(!prev->mm)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 prev->active_mm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 rq->prev_mm = oldmm;
2202 }
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002203 /*
2204 * Since the runqueue lock will be released by the next
2205 * task (which is an invalid locking op but in the case
2206 * of the scheduler it's an obvious special-case), so we
2207 * do an early lockdep release here:
2208 */
2209#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07002210 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002211#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 /* Here we just switch the register state and the stack. */
2214 switch_to(prev, next, prev);
2215
Ingo Molnardd41f592007-07-09 18:51:59 +02002216 barrier();
2217 /*
2218 * this_rq must be evaluated again because prev may have moved
2219 * CPUs since it called schedule(), thus the 'rq' on its stack
2220 * frame will be invalid.
2221 */
2222 finish_task_switch(this_rq(), prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223}
2224
2225/*
2226 * nr_running, nr_uninterruptible and nr_context_switches:
2227 *
2228 * externally visible scheduler statistics: current number of runnable
2229 * threads, current number of uninterruptible-sleeping threads, total
2230 * number of context switches performed since bootup.
2231 */
2232unsigned long nr_running(void)
2233{
2234 unsigned long i, sum = 0;
2235
2236 for_each_online_cpu(i)
2237 sum += cpu_rq(i)->nr_running;
2238
2239 return sum;
2240}
2241
2242unsigned long nr_uninterruptible(void)
2243{
2244 unsigned long i, sum = 0;
2245
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002246 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 sum += cpu_rq(i)->nr_uninterruptible;
2248
2249 /*
2250 * Since we read the counters lockless, it might be slightly
2251 * inaccurate. Do not allow it to go below zero though:
2252 */
2253 if (unlikely((long)sum < 0))
2254 sum = 0;
2255
2256 return sum;
2257}
2258
2259unsigned long long nr_context_switches(void)
2260{
Steven Rostedtcc94abf2006-06-27 02:54:31 -07002261 int i;
2262 unsigned long long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002264 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 sum += cpu_rq(i)->nr_switches;
2266
2267 return sum;
2268}
2269
2270unsigned long nr_iowait(void)
2271{
2272 unsigned long i, sum = 0;
2273
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002274 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 sum += atomic_read(&cpu_rq(i)->nr_iowait);
2276
2277 return sum;
2278}
2279
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08002280unsigned long nr_active(void)
2281{
2282 unsigned long i, running = 0, uninterruptible = 0;
2283
2284 for_each_online_cpu(i) {
2285 running += cpu_rq(i)->nr_running;
2286 uninterruptible += cpu_rq(i)->nr_uninterruptible;
2287 }
2288
2289 if (unlikely((long)uninterruptible < 0))
2290 uninterruptible = 0;
2291
2292 return running + uninterruptible;
2293}
2294
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295/*
Ingo Molnardd41f592007-07-09 18:51:59 +02002296 * Update rq->cpu_load[] statistics. This function is usually called every
2297 * scheduler tick (TICK_NSEC).
Ingo Molnar48f24c42006-07-03 00:25:40 -07002298 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002299static void update_cpu_load(struct rq *this_rq)
Ingo Molnar48f24c42006-07-03 00:25:40 -07002300{
Dmitry Adamushko495eca42007-10-15 17:00:06 +02002301 unsigned long this_load = this_rq->load.weight;
Ingo Molnardd41f592007-07-09 18:51:59 +02002302 int i, scale;
2303
2304 this_rq->nr_load_updates++;
Ingo Molnardd41f592007-07-09 18:51:59 +02002305
2306 /* Update our load: */
2307 for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
2308 unsigned long old_load, new_load;
2309
2310 /* scale is effectively 1 << i now, and >> i divides by scale */
2311
2312 old_load = this_rq->cpu_load[i];
2313 new_load = this_load;
Ingo Molnara25707f2007-10-15 17:00:03 +02002314 /*
2315 * Round up the averaging division if load is increasing. This
2316 * prevents us from getting stuck on 9 if the load is 10, for
2317 * example.
2318 */
2319 if (new_load > old_load)
2320 new_load += scale-1;
Ingo Molnardd41f592007-07-09 18:51:59 +02002321 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
2322 }
Ingo Molnar48f24c42006-07-03 00:25:40 -07002323}
2324
Ingo Molnardd41f592007-07-09 18:51:59 +02002325#ifdef CONFIG_SMP
2326
Ingo Molnar48f24c42006-07-03 00:25:40 -07002327/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 * double_rq_lock - safely lock two runqueues
2329 *
2330 * Note this does not disable interrupts like task_rq_lock,
2331 * you need to do so manually before calling.
2332 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07002333static void double_rq_lock(struct rq *rq1, struct rq *rq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 __acquires(rq1->lock)
2335 __acquires(rq2->lock)
2336{
Kirill Korotaev054b9102006-12-10 02:20:11 -08002337 BUG_ON(!irqs_disabled());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 if (rq1 == rq2) {
2339 spin_lock(&rq1->lock);
2340 __acquire(rq2->lock); /* Fake it out ;) */
2341 } else {
Chen, Kenneth Wc96d1452006-06-27 02:54:28 -07002342 if (rq1 < rq2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 spin_lock(&rq1->lock);
2344 spin_lock(&rq2->lock);
2345 } else {
2346 spin_lock(&rq2->lock);
2347 spin_lock(&rq1->lock);
2348 }
2349 }
Ingo Molnar6e82a3b2007-08-09 11:16:51 +02002350 update_rq_clock(rq1);
2351 update_rq_clock(rq2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352}
2353
2354/*
2355 * double_rq_unlock - safely unlock two runqueues
2356 *
2357 * Note this does not restore interrupts like task_rq_unlock,
2358 * you need to do so manually after calling.
2359 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07002360static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 __releases(rq1->lock)
2362 __releases(rq2->lock)
2363{
2364 spin_unlock(&rq1->lock);
2365 if (rq1 != rq2)
2366 spin_unlock(&rq2->lock);
2367 else
2368 __release(rq2->lock);
2369}
2370
2371/*
2372 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2373 */
Steven Rostedte8fa1362008-01-25 21:08:05 +01002374static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 __releases(this_rq->lock)
2376 __acquires(busiest->lock)
2377 __acquires(this_rq->lock)
2378{
Steven Rostedte8fa1362008-01-25 21:08:05 +01002379 int ret = 0;
2380
Kirill Korotaev054b9102006-12-10 02:20:11 -08002381 if (unlikely(!irqs_disabled())) {
2382 /* printk() doesn't work good under rq->lock */
2383 spin_unlock(&this_rq->lock);
2384 BUG_ON(1);
2385 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 if (unlikely(!spin_trylock(&busiest->lock))) {
Chen, Kenneth Wc96d1452006-06-27 02:54:28 -07002387 if (busiest < this_rq) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 spin_unlock(&this_rq->lock);
2389 spin_lock(&busiest->lock);
2390 spin_lock(&this_rq->lock);
Steven Rostedte8fa1362008-01-25 21:08:05 +01002391 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 } else
2393 spin_lock(&busiest->lock);
2394 }
Steven Rostedte8fa1362008-01-25 21:08:05 +01002395 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396}
2397
2398/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 * If dest_cpu is allowed for this process, migrate the task to it.
2400 * This is accomplished by forcing the cpu_allowed mask to only
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002401 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 * the cpu_allowed mask is restored.
2403 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002404static void sched_migrate_task(struct task_struct *p, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002406 struct migration_req req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002408 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409
2410 rq = task_rq_lock(p, &flags);
2411 if (!cpu_isset(dest_cpu, p->cpus_allowed)
2412 || unlikely(cpu_is_offline(dest_cpu)))
2413 goto out;
2414
2415 /* force the process onto the specified CPU */
2416 if (migrate_task(p, dest_cpu, &req)) {
2417 /* Need to wait for migration thread (might exit: take ref). */
2418 struct task_struct *mt = rq->migration_thread;
Ingo Molnar36c8b582006-07-03 00:25:41 -07002419
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 get_task_struct(mt);
2421 task_rq_unlock(rq, &flags);
2422 wake_up_process(mt);
2423 put_task_struct(mt);
2424 wait_for_completion(&req.done);
Ingo Molnar36c8b582006-07-03 00:25:41 -07002425
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 return;
2427 }
2428out:
2429 task_rq_unlock(rq, &flags);
2430}
2431
2432/*
Nick Piggin476d1392005-06-25 14:57:29 -07002433 * sched_exec - execve() is a valuable balancing opportunity, because at
2434 * this point the task has the smallest effective memory and cache footprint.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 */
2436void sched_exec(void)
2437{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 int new_cpu, this_cpu = get_cpu();
Nick Piggin476d1392005-06-25 14:57:29 -07002439 new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 put_cpu();
Nick Piggin476d1392005-06-25 14:57:29 -07002441 if (new_cpu != this_cpu)
2442 sched_migrate_task(current, new_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443}
2444
2445/*
2446 * pull_task - move a task from a remote runqueue to the local runqueue.
2447 * Both runqueues must be locked.
2448 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002449static void pull_task(struct rq *src_rq, struct task_struct *p,
2450 struct rq *this_rq, int this_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451{
Ingo Molnar2e1cb742007-08-09 11:16:49 +02002452 deactivate_task(src_rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 set_task_cpu(p, this_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02002454 activate_task(this_rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 /*
2456 * Note that idle threads have a prio of MAX_PRIO, for this test
2457 * to be always true for them.
2458 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002459 check_preempt_curr(this_rq, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460}
2461
2462/*
2463 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2464 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08002465static
Ingo Molnar70b97a72006-07-03 00:25:42 -07002466int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
Ingo Molnard15bcfd2007-07-09 18:51:57 +02002467 struct sched_domain *sd, enum cpu_idle_type idle,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07002468 int *all_pinned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469{
2470 /*
2471 * We do not migrate tasks that are:
2472 * 1) running (obviously), or
2473 * 2) cannot be migrated to this CPU due to cpus_allowed, or
2474 * 3) are cache-hot on their current CPU.
2475 */
Ingo Molnarcc367732007-10-15 17:00:18 +02002476 if (!cpu_isset(this_cpu, p->cpus_allowed)) {
2477 schedstat_inc(p, se.nr_failed_migrations_affine);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 return 0;
Ingo Molnarcc367732007-10-15 17:00:18 +02002479 }
Nick Piggin81026792005-06-25 14:57:07 -07002480 *all_pinned = 0;
2481
Ingo Molnarcc367732007-10-15 17:00:18 +02002482 if (task_running(rq, p)) {
2483 schedstat_inc(p, se.nr_failed_migrations_running);
Nick Piggin81026792005-06-25 14:57:07 -07002484 return 0;
Ingo Molnarcc367732007-10-15 17:00:18 +02002485 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486
Ingo Molnarda84d962007-10-15 17:00:18 +02002487 /*
2488 * Aggressive migration if:
2489 * 1) task is cache cold, or
2490 * 2) too many balance attempts have failed.
2491 */
2492
Ingo Molnar6bc16652007-10-15 17:00:18 +02002493 if (!task_hot(p, rq->clock, sd) ||
2494 sd->nr_balance_failed > sd->cache_nice_tries) {
Ingo Molnarda84d962007-10-15 17:00:18 +02002495#ifdef CONFIG_SCHEDSTATS
Ingo Molnarcc367732007-10-15 17:00:18 +02002496 if (task_hot(p, rq->clock, sd)) {
Ingo Molnarda84d962007-10-15 17:00:18 +02002497 schedstat_inc(sd, lb_hot_gained[idle]);
Ingo Molnarcc367732007-10-15 17:00:18 +02002498 schedstat_inc(p, se.nr_forced_migrations);
2499 }
Ingo Molnarda84d962007-10-15 17:00:18 +02002500#endif
2501 return 1;
2502 }
2503
Ingo Molnarcc367732007-10-15 17:00:18 +02002504 if (task_hot(p, rq->clock, sd)) {
2505 schedstat_inc(p, se.nr_failed_migrations_hot);
Ingo Molnarda84d962007-10-15 17:00:18 +02002506 return 0;
Ingo Molnarcc367732007-10-15 17:00:18 +02002507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 return 1;
2509}
2510
Peter Williamse1d14842007-10-24 18:23:51 +02002511static unsigned long
2512balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2513 unsigned long max_load_move, struct sched_domain *sd,
2514 enum cpu_idle_type idle, int *all_pinned,
2515 int *this_best_prio, struct rq_iterator *iterator)
Ingo Molnardd41f592007-07-09 18:51:59 +02002516{
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +01002517 int loops = 0, pulled = 0, pinned = 0, skip_for_load;
Ingo Molnardd41f592007-07-09 18:51:59 +02002518 struct task_struct *p;
2519 long rem_load_move = max_load_move;
2520
Peter Williamse1d14842007-10-24 18:23:51 +02002521 if (max_load_move == 0)
Ingo Molnardd41f592007-07-09 18:51:59 +02002522 goto out;
2523
2524 pinned = 1;
2525
2526 /*
2527 * Start the load-balancing iterator:
2528 */
2529 p = iterator->start(iterator->arg);
2530next:
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +01002531 if (!p || loops++ > sysctl_sched_nr_migrate)
Ingo Molnardd41f592007-07-09 18:51:59 +02002532 goto out;
2533 /*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +01002534 * To help distribute high priority tasks across CPUs we don't
Ingo Molnardd41f592007-07-09 18:51:59 +02002535 * skip a task if it will be the highest priority task (i.e. smallest
2536 * prio value) on its new queue regardless of its load weight
2537 */
2538 skip_for_load = (p->se.load.weight >> 1) > rem_load_move +
2539 SCHED_LOAD_SCALE_FUZZ;
Peter Williamsa4ac01c2007-08-09 11:16:46 +02002540 if ((skip_for_load && p->prio >= *this_best_prio) ||
Ingo Molnardd41f592007-07-09 18:51:59 +02002541 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
Ingo Molnardd41f592007-07-09 18:51:59 +02002542 p = iterator->next(iterator->arg);
2543 goto next;
2544 }
2545
2546 pull_task(busiest, p, this_rq, this_cpu);
2547 pulled++;
2548 rem_load_move -= p->se.load.weight;
2549
2550 /*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +01002551 * We only want to steal up to the prescribed amount of weighted load.
Ingo Molnardd41f592007-07-09 18:51:59 +02002552 */
Peter Williamse1d14842007-10-24 18:23:51 +02002553 if (rem_load_move > 0) {
Peter Williamsa4ac01c2007-08-09 11:16:46 +02002554 if (p->prio < *this_best_prio)
2555 *this_best_prio = p->prio;
Ingo Molnardd41f592007-07-09 18:51:59 +02002556 p = iterator->next(iterator->arg);
2557 goto next;
2558 }
2559out:
2560 /*
Peter Williamse1d14842007-10-24 18:23:51 +02002561 * Right now, this is one of only two places pull_task() is called,
Ingo Molnardd41f592007-07-09 18:51:59 +02002562 * so we can safely collect pull_task() stats here rather than
2563 * inside pull_task().
2564 */
2565 schedstat_add(sd, lb_gained[idle], pulled);
2566
2567 if (all_pinned)
2568 *all_pinned = pinned;
Peter Williamse1d14842007-10-24 18:23:51 +02002569
2570 return max_load_move - rem_load_move;
Ingo Molnardd41f592007-07-09 18:51:59 +02002571}
Ingo Molnar48f24c42006-07-03 00:25:40 -07002572
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573/*
Peter Williams43010652007-08-09 11:16:46 +02002574 * move_tasks tries to move up to max_load_move weighted load from busiest to
2575 * this_rq, as part of a balancing operation within domain "sd".
2576 * Returns 1 if successful and 0 otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 *
2578 * Called with both runqueues locked.
2579 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07002580static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
Peter Williams43010652007-08-09 11:16:46 +02002581 unsigned long max_load_move,
Ingo Molnard15bcfd2007-07-09 18:51:57 +02002582 struct sched_domain *sd, enum cpu_idle_type idle,
Peter Williams2dd73a42006-06-27 02:54:34 -07002583 int *all_pinned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02002585 const struct sched_class *class = sched_class_highest;
Peter Williams43010652007-08-09 11:16:46 +02002586 unsigned long total_load_moved = 0;
Peter Williamsa4ac01c2007-08-09 11:16:46 +02002587 int this_best_prio = this_rq->curr->prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
Ingo Molnardd41f592007-07-09 18:51:59 +02002589 do {
Peter Williams43010652007-08-09 11:16:46 +02002590 total_load_moved +=
2591 class->load_balance(this_rq, this_cpu, busiest,
Peter Williamse1d14842007-10-24 18:23:51 +02002592 max_load_move - total_load_moved,
Peter Williamsa4ac01c2007-08-09 11:16:46 +02002593 sd, idle, all_pinned, &this_best_prio);
Ingo Molnardd41f592007-07-09 18:51:59 +02002594 class = class->next;
Peter Williams43010652007-08-09 11:16:46 +02002595 } while (class && max_load_move > total_load_moved);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596
Peter Williams43010652007-08-09 11:16:46 +02002597 return total_load_moved > 0;
2598}
2599
Peter Williamse1d14842007-10-24 18:23:51 +02002600static int
2601iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2602 struct sched_domain *sd, enum cpu_idle_type idle,
2603 struct rq_iterator *iterator)
2604{
2605 struct task_struct *p = iterator->start(iterator->arg);
2606 int pinned = 0;
2607
2608 while (p) {
2609 if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
2610 pull_task(busiest, p, this_rq, this_cpu);
2611 /*
2612 * Right now, this is only the second place pull_task()
2613 * is called, so we can safely collect pull_task()
2614 * stats here rather than inside pull_task().
2615 */
2616 schedstat_inc(sd, lb_gained[idle]);
2617
2618 return 1;
2619 }
2620 p = iterator->next(iterator->arg);
2621 }
2622
2623 return 0;
2624}
2625
Peter Williams43010652007-08-09 11:16:46 +02002626/*
2627 * move_one_task tries to move exactly one task from busiest to this_rq, as
2628 * part of active balancing operations within "domain".
2629 * Returns 1 if successful and 0 otherwise.
2630 *
2631 * Called with both runqueues locked.
2632 */
2633static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2634 struct sched_domain *sd, enum cpu_idle_type idle)
2635{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02002636 const struct sched_class *class;
Peter Williams43010652007-08-09 11:16:46 +02002637
2638 for (class = sched_class_highest; class; class = class->next)
Peter Williamse1d14842007-10-24 18:23:51 +02002639 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
Peter Williams43010652007-08-09 11:16:46 +02002640 return 1;
2641
2642 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643}
2644
2645/*
2646 * find_busiest_group finds and returns the busiest CPU group within the
Ingo Molnar48f24c42006-07-03 00:25:40 -07002647 * domain. It calculates and returns the amount of weighted load which
2648 * should be moved to restore balance via the imbalance parameter.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 */
2650static struct sched_group *
2651find_busiest_group(struct sched_domain *sd, int this_cpu,
Ingo Molnardd41f592007-07-09 18:51:59 +02002652 unsigned long *imbalance, enum cpu_idle_type idle,
2653 int *sd_idle, cpumask_t *cpus, int *balance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654{
2655 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
2656 unsigned long max_load, avg_load, total_load, this_load, total_pwr;
Siddha, Suresh B0c117f12005-09-10 00:26:21 -07002657 unsigned long max_pull;
Peter Williams2dd73a42006-06-27 02:54:34 -07002658 unsigned long busiest_load_per_task, busiest_nr_running;
2659 unsigned long this_load_per_task, this_nr_running;
Ken Chen908a7c12007-10-17 16:55:11 +02002660 int load_idx, group_imb = 0;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002661#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2662 int power_savings_balance = 1;
2663 unsigned long leader_nr_running = 0, min_load_per_task = 0;
2664 unsigned long min_nr_running = ULONG_MAX;
2665 struct sched_group *group_min = NULL, *group_leader = NULL;
2666#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
2668 max_load = this_load = total_load = total_pwr = 0;
Peter Williams2dd73a42006-06-27 02:54:34 -07002669 busiest_load_per_task = busiest_nr_running = 0;
2670 this_load_per_task = this_nr_running = 0;
Ingo Molnard15bcfd2007-07-09 18:51:57 +02002671 if (idle == CPU_NOT_IDLE)
Nick Piggin78979862005-06-25 14:57:13 -07002672 load_idx = sd->busy_idx;
Ingo Molnard15bcfd2007-07-09 18:51:57 +02002673 else if (idle == CPU_NEWLY_IDLE)
Nick Piggin78979862005-06-25 14:57:13 -07002674 load_idx = sd->newidle_idx;
2675 else
2676 load_idx = sd->idle_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677
2678 do {
Ken Chen908a7c12007-10-17 16:55:11 +02002679 unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 int local_group;
2681 int i;
Ken Chen908a7c12007-10-17 16:55:11 +02002682 int __group_imb = 0;
Siddha, Suresh B783609c2006-12-10 02:20:33 -08002683 unsigned int balance_cpu = -1, first_idle_cpu = 0;
Peter Williams2dd73a42006-06-27 02:54:34 -07002684 unsigned long sum_nr_running, sum_weighted_load;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685
2686 local_group = cpu_isset(this_cpu, group->cpumask);
2687
Siddha, Suresh B783609c2006-12-10 02:20:33 -08002688 if (local_group)
2689 balance_cpu = first_cpu(group->cpumask);
2690
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 /* Tally up the load of all CPUs in the group */
Peter Williams2dd73a42006-06-27 02:54:34 -07002692 sum_weighted_load = sum_nr_running = avg_load = 0;
Ken Chen908a7c12007-10-17 16:55:11 +02002693 max_cpu_load = 0;
2694 min_cpu_load = ~0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695
2696 for_each_cpu_mask(i, group->cpumask) {
Christoph Lameter0a2966b2006-09-25 23:30:51 -07002697 struct rq *rq;
2698
2699 if (!cpu_isset(i, *cpus))
2700 continue;
2701
2702 rq = cpu_rq(i);
Peter Williams2dd73a42006-06-27 02:54:34 -07002703
Suresh Siddha9439aab2007-07-19 21:28:35 +02002704 if (*sd_idle && rq->nr_running)
Nick Piggin5969fe02005-09-10 00:26:19 -07002705 *sd_idle = 0;
2706
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 /* Bias balancing toward cpus of our domain */
Siddha, Suresh B783609c2006-12-10 02:20:33 -08002708 if (local_group) {
2709 if (idle_cpu(i) && !first_idle_cpu) {
2710 first_idle_cpu = 1;
2711 balance_cpu = i;
2712 }
2713
Nick Piggina2000572006-02-10 01:51:02 -08002714 load = target_load(i, load_idx);
Ken Chen908a7c12007-10-17 16:55:11 +02002715 } else {
Nick Piggina2000572006-02-10 01:51:02 -08002716 load = source_load(i, load_idx);
Ken Chen908a7c12007-10-17 16:55:11 +02002717 if (load > max_cpu_load)
2718 max_cpu_load = load;
2719 if (min_cpu_load > load)
2720 min_cpu_load = load;
2721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
2723 avg_load += load;
Peter Williams2dd73a42006-06-27 02:54:34 -07002724 sum_nr_running += rq->nr_running;
Ingo Molnardd41f592007-07-09 18:51:59 +02002725 sum_weighted_load += weighted_cpuload(i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 }
2727
Siddha, Suresh B783609c2006-12-10 02:20:33 -08002728 /*
2729 * First idle cpu or the first cpu(busiest) in this sched group
2730 * is eligible for doing load balancing at this and above
Suresh Siddha9439aab2007-07-19 21:28:35 +02002731 * domains. In the newly idle case, we will allow all the cpu's
2732 * to do the newly idle load balance.
Siddha, Suresh B783609c2006-12-10 02:20:33 -08002733 */
Suresh Siddha9439aab2007-07-19 21:28:35 +02002734 if (idle != CPU_NEWLY_IDLE && local_group &&
2735 balance_cpu != this_cpu && balance) {
Siddha, Suresh B783609c2006-12-10 02:20:33 -08002736 *balance = 0;
2737 goto ret;
2738 }
2739
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 total_load += avg_load;
Eric Dumazet5517d862007-05-08 00:32:57 -07002741 total_pwr += group->__cpu_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 /* Adjust by relative CPU power of the group */
Eric Dumazet5517d862007-05-08 00:32:57 -07002744 avg_load = sg_div_cpu_power(group,
2745 avg_load * SCHED_LOAD_SCALE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
Ken Chen908a7c12007-10-17 16:55:11 +02002747 if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE)
2748 __group_imb = 1;
2749
Eric Dumazet5517d862007-05-08 00:32:57 -07002750 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002751
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 if (local_group) {
2753 this_load = avg_load;
2754 this = group;
Peter Williams2dd73a42006-06-27 02:54:34 -07002755 this_nr_running = sum_nr_running;
2756 this_load_per_task = sum_weighted_load;
2757 } else if (avg_load > max_load &&
Ken Chen908a7c12007-10-17 16:55:11 +02002758 (sum_nr_running > group_capacity || __group_imb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 max_load = avg_load;
2760 busiest = group;
Peter Williams2dd73a42006-06-27 02:54:34 -07002761 busiest_nr_running = sum_nr_running;
2762 busiest_load_per_task = sum_weighted_load;
Ken Chen908a7c12007-10-17 16:55:11 +02002763 group_imb = __group_imb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 }
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002765
2766#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2767 /*
2768 * Busy processors will not participate in power savings
2769 * balance.
2770 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002771 if (idle == CPU_NOT_IDLE ||
2772 !(sd->flags & SD_POWERSAVINGS_BALANCE))
2773 goto group_next;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002774
2775 /*
2776 * If the local group is idle or completely loaded
2777 * no need to do power savings balance at this domain
2778 */
2779 if (local_group && (this_nr_running >= group_capacity ||
2780 !this_nr_running))
2781 power_savings_balance = 0;
2782
Ingo Molnardd41f592007-07-09 18:51:59 +02002783 /*
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002784 * If a group is already running at full capacity or idle,
2785 * don't include that group in power savings calculations
Ingo Molnardd41f592007-07-09 18:51:59 +02002786 */
2787 if (!power_savings_balance || sum_nr_running >= group_capacity
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002788 || !sum_nr_running)
Ingo Molnardd41f592007-07-09 18:51:59 +02002789 goto group_next;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002790
Ingo Molnardd41f592007-07-09 18:51:59 +02002791 /*
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002792 * Calculate the group which has the least non-idle load.
Ingo Molnardd41f592007-07-09 18:51:59 +02002793 * This is the group from where we need to pick up the load
2794 * for saving power
2795 */
2796 if ((sum_nr_running < min_nr_running) ||
2797 (sum_nr_running == min_nr_running &&
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002798 first_cpu(group->cpumask) <
2799 first_cpu(group_min->cpumask))) {
Ingo Molnardd41f592007-07-09 18:51:59 +02002800 group_min = group;
2801 min_nr_running = sum_nr_running;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002802 min_load_per_task = sum_weighted_load /
2803 sum_nr_running;
Ingo Molnardd41f592007-07-09 18:51:59 +02002804 }
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002805
Ingo Molnardd41f592007-07-09 18:51:59 +02002806 /*
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002807 * Calculate the group which is almost near its
Ingo Molnardd41f592007-07-09 18:51:59 +02002808 * capacity but still has some space to pick up some load
2809 * from other group and save more power
2810 */
2811 if (sum_nr_running <= group_capacity - 1) {
2812 if (sum_nr_running > leader_nr_running ||
2813 (sum_nr_running == leader_nr_running &&
2814 first_cpu(group->cpumask) >
2815 first_cpu(group_leader->cpumask))) {
2816 group_leader = group;
2817 leader_nr_running = sum_nr_running;
2818 }
Ingo Molnar48f24c42006-07-03 00:25:40 -07002819 }
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002820group_next:
2821#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 group = group->next;
2823 } while (group != sd->groups);
2824
Peter Williams2dd73a42006-06-27 02:54:34 -07002825 if (!busiest || this_load >= max_load || busiest_nr_running == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 goto out_balanced;
2827
2828 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
2829
2830 if (this_load >= avg_load ||
2831 100*max_load <= sd->imbalance_pct*this_load)
2832 goto out_balanced;
2833
Peter Williams2dd73a42006-06-27 02:54:34 -07002834 busiest_load_per_task /= busiest_nr_running;
Ken Chen908a7c12007-10-17 16:55:11 +02002835 if (group_imb)
2836 busiest_load_per_task = min(busiest_load_per_task, avg_load);
2837
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 /*
2839 * We're trying to get all the cpus to the average_load, so we don't
2840 * want to push ourselves above the average load, nor do we wish to
2841 * reduce the max loaded cpu below the average load, as either of these
2842 * actions would just result in more rebalancing later, and ping-pong
2843 * tasks around. Thus we look for the minimum possible imbalance.
2844 * Negative imbalances (*we* are more loaded than anyone else) will
2845 * be counted as no imbalance for these purposes -- we can't fix that
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002846 * by pulling tasks to us. Be careful of negative numbers as they'll
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 * appear as very large values with unsigned longs.
2848 */
Peter Williams2dd73a42006-06-27 02:54:34 -07002849 if (max_load <= busiest_load_per_task)
2850 goto out_balanced;
2851
2852 /*
2853 * In the presence of smp nice balancing, certain scenarios can have
2854 * max load less than avg load(as we skip the groups at or below
2855 * its cpu_power, while calculating max_load..)
2856 */
2857 if (max_load < avg_load) {
2858 *imbalance = 0;
2859 goto small_imbalance;
2860 }
Siddha, Suresh B0c117f12005-09-10 00:26:21 -07002861
2862 /* Don't want to pull so many tasks that a group would go idle */
Peter Williams2dd73a42006-06-27 02:54:34 -07002863 max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
Siddha, Suresh B0c117f12005-09-10 00:26:21 -07002864
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 /* How much load to actually move to equalise the imbalance */
Eric Dumazet5517d862007-05-08 00:32:57 -07002866 *imbalance = min(max_pull * busiest->__cpu_power,
2867 (avg_load - this_load) * this->__cpu_power)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 / SCHED_LOAD_SCALE;
2869
Peter Williams2dd73a42006-06-27 02:54:34 -07002870 /*
2871 * if *imbalance is less than the average load per runnable task
2872 * there is no gaurantee that any tasks will be moved so we'll have
2873 * a think about bumping its value to force at least one task to be
2874 * moved
2875 */
Suresh Siddha7fd0d2d2007-09-05 14:32:48 +02002876 if (*imbalance < busiest_load_per_task) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07002877 unsigned long tmp, pwr_now, pwr_move;
Peter Williams2dd73a42006-06-27 02:54:34 -07002878 unsigned int imbn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
Peter Williams2dd73a42006-06-27 02:54:34 -07002880small_imbalance:
2881 pwr_move = pwr_now = 0;
2882 imbn = 2;
2883 if (this_nr_running) {
2884 this_load_per_task /= this_nr_running;
2885 if (busiest_load_per_task > this_load_per_task)
2886 imbn = 1;
2887 } else
2888 this_load_per_task = SCHED_LOAD_SCALE;
2889
Ingo Molnardd41f592007-07-09 18:51:59 +02002890 if (max_load - this_load + SCHED_LOAD_SCALE_FUZZ >=
2891 busiest_load_per_task * imbn) {
Peter Williams2dd73a42006-06-27 02:54:34 -07002892 *imbalance = busiest_load_per_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 return busiest;
2894 }
2895
2896 /*
2897 * OK, we don't have enough imbalance to justify moving tasks,
2898 * however we may be able to increase total CPU power used by
2899 * moving them.
2900 */
2901
Eric Dumazet5517d862007-05-08 00:32:57 -07002902 pwr_now += busiest->__cpu_power *
2903 min(busiest_load_per_task, max_load);
2904 pwr_now += this->__cpu_power *
2905 min(this_load_per_task, this_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 pwr_now /= SCHED_LOAD_SCALE;
2907
2908 /* Amount of load we'd subtract */
Eric Dumazet5517d862007-05-08 00:32:57 -07002909 tmp = sg_div_cpu_power(busiest,
2910 busiest_load_per_task * SCHED_LOAD_SCALE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 if (max_load > tmp)
Eric Dumazet5517d862007-05-08 00:32:57 -07002912 pwr_move += busiest->__cpu_power *
Peter Williams2dd73a42006-06-27 02:54:34 -07002913 min(busiest_load_per_task, max_load - tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914
2915 /* Amount of load we'd add */
Eric Dumazet5517d862007-05-08 00:32:57 -07002916 if (max_load * busiest->__cpu_power <
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08002917 busiest_load_per_task * SCHED_LOAD_SCALE)
Eric Dumazet5517d862007-05-08 00:32:57 -07002918 tmp = sg_div_cpu_power(this,
2919 max_load * busiest->__cpu_power);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 else
Eric Dumazet5517d862007-05-08 00:32:57 -07002921 tmp = sg_div_cpu_power(this,
2922 busiest_load_per_task * SCHED_LOAD_SCALE);
2923 pwr_move += this->__cpu_power *
2924 min(this_load_per_task, this_load + tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 pwr_move /= SCHED_LOAD_SCALE;
2926
2927 /* Move if we gain throughput */
Suresh Siddha7fd0d2d2007-09-05 14:32:48 +02002928 if (pwr_move > pwr_now)
2929 *imbalance = busiest_load_per_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 }
2931
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 return busiest;
2933
2934out_balanced:
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002935#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Ingo Molnard15bcfd2007-07-09 18:51:57 +02002936 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002937 goto ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002939 if (this == group_leader && group_leader != group_min) {
2940 *imbalance = min_load_per_task;
2941 return group_min;
2942 }
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07002943#endif
Siddha, Suresh B783609c2006-12-10 02:20:33 -08002944ret:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 *imbalance = 0;
2946 return NULL;
2947}
2948
2949/*
2950 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2951 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07002952static struct rq *
Ingo Molnard15bcfd2007-07-09 18:51:57 +02002953find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
Christoph Lameter0a2966b2006-09-25 23:30:51 -07002954 unsigned long imbalance, cpumask_t *cpus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002956 struct rq *busiest = NULL, *rq;
Peter Williams2dd73a42006-06-27 02:54:34 -07002957 unsigned long max_load = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 int i;
2959
2960 for_each_cpu_mask(i, group->cpumask) {
Ingo Molnardd41f592007-07-09 18:51:59 +02002961 unsigned long wl;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07002962
2963 if (!cpu_isset(i, *cpus))
2964 continue;
2965
Ingo Molnar48f24c42006-07-03 00:25:40 -07002966 rq = cpu_rq(i);
Ingo Molnardd41f592007-07-09 18:51:59 +02002967 wl = weighted_cpuload(i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968
Ingo Molnardd41f592007-07-09 18:51:59 +02002969 if (rq->nr_running == 1 && wl > imbalance)
Peter Williams2dd73a42006-06-27 02:54:34 -07002970 continue;
2971
Ingo Molnardd41f592007-07-09 18:51:59 +02002972 if (wl > max_load) {
2973 max_load = wl;
Ingo Molnar48f24c42006-07-03 00:25:40 -07002974 busiest = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 }
2976 }
2977
2978 return busiest;
2979}
2980
2981/*
Nick Piggin77391d72005-06-25 14:57:30 -07002982 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
2983 * so long as it is large enough.
2984 */
2985#define MAX_PINNED_INTERVAL 512
2986
2987/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2989 * tasks if there is an imbalance.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07002991static int load_balance(int this_cpu, struct rq *this_rq,
Ingo Molnard15bcfd2007-07-09 18:51:57 +02002992 struct sched_domain *sd, enum cpu_idle_type idle,
Siddha, Suresh B783609c2006-12-10 02:20:33 -08002993 int *balance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994{
Peter Williams43010652007-08-09 11:16:46 +02002995 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 struct sched_group *group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 unsigned long imbalance;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002998 struct rq *busiest;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07002999 cpumask_t cpus = CPU_MASK_ALL;
Christoph Lameterfe2eea32006-12-10 02:20:21 -08003000 unsigned long flags;
Nick Piggin5969fe02005-09-10 00:26:19 -07003001
Siddha, Suresh B89c47102006-10-03 01:14:09 -07003002 /*
3003 * When power savings policy is enabled for the parent domain, idle
3004 * sibling can pick up load irrespective of busy siblings. In this case,
Ingo Molnardd41f592007-07-09 18:51:59 +02003005 * let the state of idle sibling percolate up as CPU_IDLE, instead of
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003006 * portraying it as CPU_NOT_IDLE.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07003007 */
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003008 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
Siddha, Suresh B89c47102006-10-03 01:14:09 -07003009 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Nick Piggin5969fe02005-09-10 00:26:19 -07003010 sd_idle = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
Ingo Molnar2d723762007-10-15 17:00:12 +02003012 schedstat_inc(sd, lb_count[idle]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013
Christoph Lameter0a2966b2006-09-25 23:30:51 -07003014redo:
3015 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
Siddha, Suresh B783609c2006-12-10 02:20:33 -08003016 &cpus, balance);
3017
Chen, Kenneth W06066712006-12-10 02:20:35 -08003018 if (*balance == 0)
Siddha, Suresh B783609c2006-12-10 02:20:33 -08003019 goto out_balanced;
Siddha, Suresh B783609c2006-12-10 02:20:33 -08003020
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 if (!group) {
3022 schedstat_inc(sd, lb_nobusyg[idle]);
3023 goto out_balanced;
3024 }
3025
Christoph Lameter0a2966b2006-09-25 23:30:51 -07003026 busiest = find_busiest_queue(group, idle, imbalance, &cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 if (!busiest) {
3028 schedstat_inc(sd, lb_nobusyq[idle]);
3029 goto out_balanced;
3030 }
3031
Nick Piggindb935db2005-06-25 14:57:11 -07003032 BUG_ON(busiest == this_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033
3034 schedstat_add(sd, lb_imbalance[idle], imbalance);
3035
Peter Williams43010652007-08-09 11:16:46 +02003036 ld_moved = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 if (busiest->nr_running > 1) {
3038 /*
3039 * Attempt to move tasks. If find_busiest_group has found
3040 * an imbalance but busiest->nr_running <= 1, the group is
Peter Williams43010652007-08-09 11:16:46 +02003041 * still unbalanced. ld_moved simply stays zero, so it is
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 * correctly treated as an imbalance.
3043 */
Christoph Lameterfe2eea32006-12-10 02:20:21 -08003044 local_irq_save(flags);
Nick Piggine17224b2005-09-10 00:26:18 -07003045 double_rq_lock(this_rq, busiest);
Peter Williams43010652007-08-09 11:16:46 +02003046 ld_moved = move_tasks(this_rq, this_cpu, busiest,
Ingo Molnar48f24c42006-07-03 00:25:40 -07003047 imbalance, sd, idle, &all_pinned);
Nick Piggine17224b2005-09-10 00:26:18 -07003048 double_rq_unlock(this_rq, busiest);
Christoph Lameterfe2eea32006-12-10 02:20:21 -08003049 local_irq_restore(flags);
Nick Piggin81026792005-06-25 14:57:07 -07003050
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003051 /*
3052 * some other cpu did the load balance for us.
3053 */
Peter Williams43010652007-08-09 11:16:46 +02003054 if (ld_moved && this_cpu != smp_processor_id())
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003055 resched_cpu(this_cpu);
3056
Nick Piggin81026792005-06-25 14:57:07 -07003057 /* All tasks on this runqueue were pinned by CPU affinity */
Christoph Lameter0a2966b2006-09-25 23:30:51 -07003058 if (unlikely(all_pinned)) {
3059 cpu_clear(cpu_of(busiest), cpus);
3060 if (!cpus_empty(cpus))
3061 goto redo;
Nick Piggin81026792005-06-25 14:57:07 -07003062 goto out_balanced;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07003063 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 }
Nick Piggin81026792005-06-25 14:57:07 -07003065
Peter Williams43010652007-08-09 11:16:46 +02003066 if (!ld_moved) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067 schedstat_inc(sd, lb_failed[idle]);
3068 sd->nr_balance_failed++;
3069
3070 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071
Christoph Lameterfe2eea32006-12-10 02:20:21 -08003072 spin_lock_irqsave(&busiest->lock, flags);
Siddha, Suresh Bfa3b6dd2005-09-10 00:26:21 -07003073
3074 /* don't kick the migration_thread, if the curr
3075 * task on busiest cpu can't be moved to this_cpu
3076 */
3077 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
Christoph Lameterfe2eea32006-12-10 02:20:21 -08003078 spin_unlock_irqrestore(&busiest->lock, flags);
Siddha, Suresh Bfa3b6dd2005-09-10 00:26:21 -07003079 all_pinned = 1;
3080 goto out_one_pinned;
3081 }
3082
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 if (!busiest->active_balance) {
3084 busiest->active_balance = 1;
3085 busiest->push_cpu = this_cpu;
Nick Piggin81026792005-06-25 14:57:07 -07003086 active_balance = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087 }
Christoph Lameterfe2eea32006-12-10 02:20:21 -08003088 spin_unlock_irqrestore(&busiest->lock, flags);
Nick Piggin81026792005-06-25 14:57:07 -07003089 if (active_balance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 wake_up_process(busiest->migration_thread);
3091
3092 /*
3093 * We've kicked active balancing, reset the failure
3094 * counter.
3095 */
Nick Piggin39507452005-06-25 14:57:09 -07003096 sd->nr_balance_failed = sd->cache_nice_tries+1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097 }
Nick Piggin81026792005-06-25 14:57:07 -07003098 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099 sd->nr_balance_failed = 0;
3100
Nick Piggin81026792005-06-25 14:57:07 -07003101 if (likely(!active_balance)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102 /* We were unbalanced, so reset the balancing interval */
3103 sd->balance_interval = sd->min_interval;
Nick Piggin81026792005-06-25 14:57:07 -07003104 } else {
3105 /*
3106 * If we've begun active balancing, start to back off. This
3107 * case may not be covered by the all_pinned logic if there
3108 * is only 1 task on the busy runqueue (because we don't call
3109 * move_tasks).
3110 */
3111 if (sd->balance_interval < sd->max_interval)
3112 sd->balance_interval *= 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 }
3114
Peter Williams43010652007-08-09 11:16:46 +02003115 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
Siddha, Suresh B89c47102006-10-03 01:14:09 -07003116 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Nick Piggin5969fe02005-09-10 00:26:19 -07003117 return -1;
Peter Williams43010652007-08-09 11:16:46 +02003118 return ld_moved;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119
3120out_balanced:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 schedstat_inc(sd, lb_balanced[idle]);
3122
Nick Piggin16cfb1c2005-06-25 14:57:08 -07003123 sd->nr_balance_failed = 0;
Siddha, Suresh Bfa3b6dd2005-09-10 00:26:21 -07003124
3125out_one_pinned:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 /* tune up the balancing interval */
Nick Piggin77391d72005-06-25 14:57:30 -07003127 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3128 (sd->balance_interval < sd->max_interval))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 sd->balance_interval *= 2;
3130
Ingo Molnar48f24c42006-07-03 00:25:40 -07003131 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
Siddha, Suresh B89c47102006-10-03 01:14:09 -07003132 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Nick Piggin5969fe02005-09-10 00:26:19 -07003133 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134 return 0;
3135}
3136
3137/*
3138 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3139 * tasks if there is an imbalance.
3140 *
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003141 * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 * this_rq is locked.
3143 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07003144static int
Ingo Molnar70b97a72006-07-03 00:25:42 -07003145load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146{
3147 struct sched_group *group;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003148 struct rq *busiest = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149 unsigned long imbalance;
Peter Williams43010652007-08-09 11:16:46 +02003150 int ld_moved = 0;
Nick Piggin5969fe02005-09-10 00:26:19 -07003151 int sd_idle = 0;
Suresh Siddha969bb4e2007-07-19 21:28:35 +02003152 int all_pinned = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07003153 cpumask_t cpus = CPU_MASK_ALL;
Nick Piggin5969fe02005-09-10 00:26:19 -07003154
Siddha, Suresh B89c47102006-10-03 01:14:09 -07003155 /*
3156 * When power savings policy is enabled for the parent domain, idle
3157 * sibling can pick up load irrespective of busy siblings. In this case,
3158 * let the state of idle sibling percolate up as IDLE, instead of
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003159 * portraying it as CPU_NOT_IDLE.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07003160 */
3161 if (sd->flags & SD_SHARE_CPUPOWER &&
3162 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Nick Piggin5969fe02005-09-10 00:26:19 -07003163 sd_idle = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164
Ingo Molnar2d723762007-10-15 17:00:12 +02003165 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
Christoph Lameter0a2966b2006-09-25 23:30:51 -07003166redo:
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003167 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
Siddha, Suresh B783609c2006-12-10 02:20:33 -08003168 &sd_idle, &cpus, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 if (!group) {
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003170 schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
Nick Piggin16cfb1c2005-06-25 14:57:08 -07003171 goto out_balanced;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172 }
3173
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003174 busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance,
Christoph Lameter0a2966b2006-09-25 23:30:51 -07003175 &cpus);
Nick Piggindb935db2005-06-25 14:57:11 -07003176 if (!busiest) {
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003177 schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
Nick Piggin16cfb1c2005-06-25 14:57:08 -07003178 goto out_balanced;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 }
3180
Nick Piggindb935db2005-06-25 14:57:11 -07003181 BUG_ON(busiest == this_rq);
3182
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003183 schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
Nick Piggind6d5cfa2005-09-10 00:26:16 -07003184
Peter Williams43010652007-08-09 11:16:46 +02003185 ld_moved = 0;
Nick Piggind6d5cfa2005-09-10 00:26:16 -07003186 if (busiest->nr_running > 1) {
3187 /* Attempt to move tasks */
3188 double_lock_balance(this_rq, busiest);
Ingo Molnar6e82a3b2007-08-09 11:16:51 +02003189 /* this_rq->clock is already updated */
3190 update_rq_clock(busiest);
Peter Williams43010652007-08-09 11:16:46 +02003191 ld_moved = move_tasks(this_rq, this_cpu, busiest,
Suresh Siddha969bb4e2007-07-19 21:28:35 +02003192 imbalance, sd, CPU_NEWLY_IDLE,
3193 &all_pinned);
Nick Piggind6d5cfa2005-09-10 00:26:16 -07003194 spin_unlock(&busiest->lock);
Christoph Lameter0a2966b2006-09-25 23:30:51 -07003195
Suresh Siddha969bb4e2007-07-19 21:28:35 +02003196 if (unlikely(all_pinned)) {
Christoph Lameter0a2966b2006-09-25 23:30:51 -07003197 cpu_clear(cpu_of(busiest), cpus);
3198 if (!cpus_empty(cpus))
3199 goto redo;
3200 }
Nick Piggind6d5cfa2005-09-10 00:26:16 -07003201 }
3202
Peter Williams43010652007-08-09 11:16:46 +02003203 if (!ld_moved) {
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003204 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
Siddha, Suresh B89c47102006-10-03 01:14:09 -07003205 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3206 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Nick Piggin5969fe02005-09-10 00:26:19 -07003207 return -1;
3208 } else
Nick Piggin16cfb1c2005-06-25 14:57:08 -07003209 sd->nr_balance_failed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210
Peter Williams43010652007-08-09 11:16:46 +02003211 return ld_moved;
Nick Piggin16cfb1c2005-06-25 14:57:08 -07003212
3213out_balanced:
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003214 schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
Ingo Molnar48f24c42006-07-03 00:25:40 -07003215 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
Siddha, Suresh B89c47102006-10-03 01:14:09 -07003216 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
Nick Piggin5969fe02005-09-10 00:26:19 -07003217 return -1;
Nick Piggin16cfb1c2005-06-25 14:57:08 -07003218 sd->nr_balance_failed = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003219
Nick Piggin16cfb1c2005-06-25 14:57:08 -07003220 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221}
3222
3223/*
3224 * idle_balance is called by schedule() if this_cpu is about to become
3225 * idle. Attempts to pull tasks from other CPUs.
3226 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07003227static void idle_balance(int this_cpu, struct rq *this_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228{
3229 struct sched_domain *sd;
Ingo Molnardd41f592007-07-09 18:51:59 +02003230 int pulled_task = -1;
3231 unsigned long next_balance = jiffies + HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232
3233 for_each_domain(this_cpu, sd) {
Christoph Lameter92c4ca52007-06-23 17:16:33 -07003234 unsigned long interval;
3235
3236 if (!(sd->flags & SD_LOAD_BALANCE))
3237 continue;
3238
3239 if (sd->flags & SD_BALANCE_NEWIDLE)
Ingo Molnar48f24c42006-07-03 00:25:40 -07003240 /* If we've pulled tasks over stop searching: */
Christoph Lameter1bd77f22006-12-10 02:20:27 -08003241 pulled_task = load_balance_newidle(this_cpu,
Christoph Lameter92c4ca52007-06-23 17:16:33 -07003242 this_rq, sd);
3243
3244 interval = msecs_to_jiffies(sd->balance_interval);
3245 if (time_after(next_balance, sd->last_balance + interval))
3246 next_balance = sd->last_balance + interval;
3247 if (pulled_task)
3248 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249 }
Ingo Molnardd41f592007-07-09 18:51:59 +02003250 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
Christoph Lameter1bd77f22006-12-10 02:20:27 -08003251 /*
3252 * We are going idle. next_balance may be set based on
3253 * a busy processor. So reset next_balance.
3254 */
3255 this_rq->next_balance = next_balance;
Ingo Molnardd41f592007-07-09 18:51:59 +02003256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257}
3258
3259/*
3260 * active_load_balance is run by migration threads. It pushes running tasks
3261 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
3262 * running on each physical CPU where possible, and avoids physical /
3263 * logical imbalances.
3264 *
3265 * Called with busiest_rq locked.
3266 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07003267static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268{
Nick Piggin39507452005-06-25 14:57:09 -07003269 int target_cpu = busiest_rq->push_cpu;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003270 struct sched_domain *sd;
3271 struct rq *target_rq;
Nick Piggin39507452005-06-25 14:57:09 -07003272
Ingo Molnar48f24c42006-07-03 00:25:40 -07003273 /* Is there any task to move? */
Nick Piggin39507452005-06-25 14:57:09 -07003274 if (busiest_rq->nr_running <= 1)
Nick Piggin39507452005-06-25 14:57:09 -07003275 return;
3276
3277 target_rq = cpu_rq(target_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278
3279 /*
Nick Piggin39507452005-06-25 14:57:09 -07003280 * This condition is "impossible", if it occurs
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003281 * we need to fix it. Originally reported by
Nick Piggin39507452005-06-25 14:57:09 -07003282 * Bjorn Helgaas on a 128-cpu setup.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 */
Nick Piggin39507452005-06-25 14:57:09 -07003284 BUG_ON(busiest_rq == target_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285
Nick Piggin39507452005-06-25 14:57:09 -07003286 /* move a task from busiest_rq to target_rq */
3287 double_lock_balance(busiest_rq, target_rq);
Ingo Molnar6e82a3b2007-08-09 11:16:51 +02003288 update_rq_clock(busiest_rq);
3289 update_rq_clock(target_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290
Nick Piggin39507452005-06-25 14:57:09 -07003291 /* Search for an sd spanning us and the target CPU. */
Chen, Kenneth Wc96d1452006-06-27 02:54:28 -07003292 for_each_domain(target_cpu, sd) {
Nick Piggin39507452005-06-25 14:57:09 -07003293 if ((sd->flags & SD_LOAD_BALANCE) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07003294 cpu_isset(busiest_cpu, sd->span))
Nick Piggin39507452005-06-25 14:57:09 -07003295 break;
Chen, Kenneth Wc96d1452006-06-27 02:54:28 -07003296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297
Ingo Molnar48f24c42006-07-03 00:25:40 -07003298 if (likely(sd)) {
Ingo Molnar2d723762007-10-15 17:00:12 +02003299 schedstat_inc(sd, alb_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300
Peter Williams43010652007-08-09 11:16:46 +02003301 if (move_one_task(target_rq, target_cpu, busiest_rq,
3302 sd, CPU_IDLE))
Ingo Molnar48f24c42006-07-03 00:25:40 -07003303 schedstat_inc(sd, alb_pushed);
3304 else
3305 schedstat_inc(sd, alb_failed);
3306 }
Nick Piggin39507452005-06-25 14:57:09 -07003307 spin_unlock(&target_rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308}
3309
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003310#ifdef CONFIG_NO_HZ
3311static struct {
3312 atomic_t load_balancer;
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003313 cpumask_t cpu_mask;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003314} nohz ____cacheline_aligned = {
3315 .load_balancer = ATOMIC_INIT(-1),
3316 .cpu_mask = CPU_MASK_NONE,
3317};
3318
Christoph Lameter7835b982006-12-10 02:20:22 -08003319/*
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003320 * This routine will try to nominate the ilb (idle load balancing)
3321 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
3322 * load balancing on behalf of all those cpus. If all the cpus in the system
3323 * go into this tickless mode, then there will be no ilb owner (as there is
3324 * no need for one) and all the cpus will sleep till the next wakeup event
3325 * arrives...
Christoph Lameter7835b982006-12-10 02:20:22 -08003326 *
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003327 * For the ilb owner, tick is not stopped. And this tick will be used
3328 * for idle load balancing. ilb owner will still be part of
3329 * nohz.cpu_mask..
3330 *
3331 * While stopping the tick, this cpu will become the ilb owner if there
3332 * is no other owner. And will be the owner till that cpu becomes busy
3333 * or if all cpus in the system stop their ticks at which point
3334 * there is no need for ilb owner.
3335 *
3336 * When the ilb owner becomes busy, it nominates another owner, during the
3337 * next busy scheduler_tick()
3338 */
3339int select_nohz_load_balancer(int stop_tick)
3340{
3341 int cpu = smp_processor_id();
3342
3343 if (stop_tick) {
3344 cpu_set(cpu, nohz.cpu_mask);
3345 cpu_rq(cpu)->in_nohz_recently = 1;
3346
3347 /*
3348 * If we are going offline and still the leader, give up!
3349 */
3350 if (cpu_is_offline(cpu) &&
3351 atomic_read(&nohz.load_balancer) == cpu) {
3352 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3353 BUG();
3354 return 0;
3355 }
3356
3357 /* time for ilb owner also to sleep */
3358 if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
3359 if (atomic_read(&nohz.load_balancer) == cpu)
3360 atomic_set(&nohz.load_balancer, -1);
3361 return 0;
3362 }
3363
3364 if (atomic_read(&nohz.load_balancer) == -1) {
3365 /* make me the ilb owner */
3366 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
3367 return 1;
3368 } else if (atomic_read(&nohz.load_balancer) == cpu)
3369 return 1;
3370 } else {
3371 if (!cpu_isset(cpu, nohz.cpu_mask))
3372 return 0;
3373
3374 cpu_clear(cpu, nohz.cpu_mask);
3375
3376 if (atomic_read(&nohz.load_balancer) == cpu)
3377 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3378 BUG();
3379 }
3380 return 0;
3381}
3382#endif
3383
3384static DEFINE_SPINLOCK(balancing);
3385
3386/*
Christoph Lameter7835b982006-12-10 02:20:22 -08003387 * It checks each scheduling domain to see if it is due to be balanced,
3388 * and initiates a balancing operation if so.
3389 *
3390 * Balancing parameters are set up in arch_init_sched_domains.
3391 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02003392static void rebalance_domains(int cpu, enum cpu_idle_type idle)
Christoph Lameter7835b982006-12-10 02:20:22 -08003393{
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003394 int balance = 1;
3395 struct rq *rq = cpu_rq(cpu);
Christoph Lameter7835b982006-12-10 02:20:22 -08003396 unsigned long interval;
3397 struct sched_domain *sd;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003398 /* Earliest time when we have to do rebalance again */
Christoph Lameterc9819f42006-12-10 02:20:25 -08003399 unsigned long next_balance = jiffies + 60*HZ;
Suresh Siddhaf549da82007-08-23 15:18:02 +02003400 int update_next_balance = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003402 for_each_domain(cpu, sd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 if (!(sd->flags & SD_LOAD_BALANCE))
3404 continue;
3405
3406 interval = sd->balance_interval;
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003407 if (idle != CPU_IDLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 interval *= sd->busy_factor;
3409
3410 /* scale ms to jiffies */
3411 interval = msecs_to_jiffies(interval);
3412 if (unlikely(!interval))
3413 interval = 1;
Ingo Molnardd41f592007-07-09 18:51:59 +02003414 if (interval > HZ*NR_CPUS/10)
3415 interval = HZ*NR_CPUS/10;
3416
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
Christoph Lameter08c183f2006-12-10 02:20:29 -08003418 if (sd->flags & SD_SERIALIZE) {
3419 if (!spin_trylock(&balancing))
3420 goto out;
3421 }
3422
Christoph Lameterc9819f42006-12-10 02:20:25 -08003423 if (time_after_eq(jiffies, sd->last_balance + interval)) {
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003424 if (load_balance(cpu, rq, sd, idle, &balance)) {
Siddha, Suresh Bfa3b6dd2005-09-10 00:26:21 -07003425 /*
3426 * We've pulled tasks over so either we're no
Nick Piggin5969fe02005-09-10 00:26:19 -07003427 * longer idle, or one of our SMT siblings is
3428 * not idle.
3429 */
Ingo Molnard15bcfd2007-07-09 18:51:57 +02003430 idle = CPU_NOT_IDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 }
Christoph Lameter1bd77f22006-12-10 02:20:27 -08003432 sd->last_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 }
Christoph Lameter08c183f2006-12-10 02:20:29 -08003434 if (sd->flags & SD_SERIALIZE)
3435 spin_unlock(&balancing);
3436out:
Suresh Siddhaf549da82007-08-23 15:18:02 +02003437 if (time_after(next_balance, sd->last_balance + interval)) {
Christoph Lameterc9819f42006-12-10 02:20:25 -08003438 next_balance = sd->last_balance + interval;
Suresh Siddhaf549da82007-08-23 15:18:02 +02003439 update_next_balance = 1;
3440 }
Siddha, Suresh B783609c2006-12-10 02:20:33 -08003441
3442 /*
3443 * Stop the load balance at this level. There is another
3444 * CPU in our sched group which is doing load balancing more
3445 * actively.
3446 */
3447 if (!balance)
3448 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 }
Suresh Siddhaf549da82007-08-23 15:18:02 +02003450
3451 /*
3452 * next_balance will be updated only when there is a need.
3453 * When the cpu is attached to null domain for ex, it will not be
3454 * updated.
3455 */
3456 if (likely(update_next_balance))
3457 rq->next_balance = next_balance;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003458}
3459
3460/*
3461 * run_rebalance_domains is triggered when needed from the scheduler tick.
3462 * In CONFIG_NO_HZ case, the idle load balance owner will do the
3463 * rebalancing for all the cpus for whom scheduler ticks are stopped.
3464 */
3465static void run_rebalance_domains(struct softirq_action *h)
3466{
Ingo Molnardd41f592007-07-09 18:51:59 +02003467 int this_cpu = smp_processor_id();
3468 struct rq *this_rq = cpu_rq(this_cpu);
3469 enum cpu_idle_type idle = this_rq->idle_at_tick ?
3470 CPU_IDLE : CPU_NOT_IDLE;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003471
Ingo Molnardd41f592007-07-09 18:51:59 +02003472 rebalance_domains(this_cpu, idle);
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003473
3474#ifdef CONFIG_NO_HZ
3475 /*
3476 * If this cpu is the owner for idle load balancing, then do the
3477 * balancing on behalf of the other idle cpus whose ticks are
3478 * stopped.
3479 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003480 if (this_rq->idle_at_tick &&
3481 atomic_read(&nohz.load_balancer) == this_cpu) {
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003482 cpumask_t cpus = nohz.cpu_mask;
3483 struct rq *rq;
3484 int balance_cpu;
3485
Ingo Molnardd41f592007-07-09 18:51:59 +02003486 cpu_clear(this_cpu, cpus);
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003487 for_each_cpu_mask(balance_cpu, cpus) {
3488 /*
3489 * If this cpu gets work to do, stop the load balancing
3490 * work being done for other cpus. Next load
3491 * balancing owner will pick it up.
3492 */
3493 if (need_resched())
3494 break;
3495
Oleg Nesterovde0cf892007-08-12 18:08:19 +02003496 rebalance_domains(balance_cpu, CPU_IDLE);
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003497
3498 rq = cpu_rq(balance_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003499 if (time_after(this_rq->next_balance, rq->next_balance))
3500 this_rq->next_balance = rq->next_balance;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003501 }
3502 }
3503#endif
3504}
3505
3506/*
3507 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
3508 *
3509 * In case of CONFIG_NO_HZ, this is the place where we nominate a new
3510 * idle load balancing owner or decide to stop the periodic load balancing,
3511 * if the whole system is idle.
3512 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003513static inline void trigger_load_balance(struct rq *rq, int cpu)
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003514{
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -07003515#ifdef CONFIG_NO_HZ
3516 /*
3517 * If we were in the nohz mode recently and busy at the current
3518 * scheduler tick, then check if we need to nominate new idle
3519 * load balancer.
3520 */
3521 if (rq->in_nohz_recently && !rq->idle_at_tick) {
3522 rq->in_nohz_recently = 0;
3523
3524 if (atomic_read(&nohz.load_balancer) == cpu) {
3525 cpu_clear(cpu, nohz.cpu_mask);
3526 atomic_set(&nohz.load_balancer, -1);
3527 }
3528
3529 if (atomic_read(&nohz.load_balancer) == -1) {
3530 /*
3531 * simple selection for now: Nominate the
3532 * first cpu in the nohz list to be the next
3533 * ilb owner.
3534 *
3535 * TBD: Traverse the sched domains and nominate
3536 * the nearest cpu in the nohz.cpu_mask.
3537 */
3538 int ilb = first_cpu(nohz.cpu_mask);
3539
3540 if (ilb != NR_CPUS)
3541 resched_cpu(ilb);
3542 }
3543 }
3544
3545 /*
3546 * If this cpu is idle and doing idle load balancing for all the
3547 * cpus with ticks stopped, is it time for that to stop?
3548 */
3549 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
3550 cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
3551 resched_cpu(cpu);
3552 return;
3553 }
3554
3555 /*
3556 * If this cpu is idle and the idle load balancing is done by
3557 * someone else, then no need raise the SCHED_SOFTIRQ
3558 */
3559 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
3560 cpu_isset(cpu, nohz.cpu_mask))
3561 return;
3562#endif
3563 if (time_after_eq(jiffies, rq->next_balance))
3564 raise_softirq(SCHED_SOFTIRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565}
Ingo Molnardd41f592007-07-09 18:51:59 +02003566
3567#else /* CONFIG_SMP */
3568
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569/*
3570 * on UP we do not need to balance between CPUs:
3571 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07003572static inline void idle_balance(int cpu, struct rq *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573{
3574}
Ingo Molnardd41f592007-07-09 18:51:59 +02003575
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576#endif
3577
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578DEFINE_PER_CPU(struct kernel_stat, kstat);
3579
3580EXPORT_PER_CPU_SYMBOL(kstat);
3581
3582/*
Ingo Molnar41b86e92007-07-09 18:51:58 +02003583 * Return p->sum_exec_runtime plus any more ns on the sched_clock
3584 * that have not yet been banked in case the task is currently running.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 */
Ingo Molnar41b86e92007-07-09 18:51:58 +02003586unsigned long long task_sched_runtime(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588 unsigned long flags;
Ingo Molnar41b86e92007-07-09 18:51:58 +02003589 u64 ns, delta_exec;
3590 struct rq *rq;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003591
Ingo Molnar41b86e92007-07-09 18:51:58 +02003592 rq = task_rq_lock(p, &flags);
3593 ns = p->se.sum_exec_runtime;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01003594 if (task_current(rq, p)) {
Ingo Molnara8e504d2007-08-09 11:16:47 +02003595 update_rq_clock(rq);
3596 delta_exec = rq->clock - p->se.exec_start;
Ingo Molnar41b86e92007-07-09 18:51:58 +02003597 if ((s64)delta_exec > 0)
3598 ns += delta_exec;
3599 }
3600 task_rq_unlock(rq, &flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07003601
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 return ns;
3603}
3604
3605/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 * Account user cpu time to a process.
3607 * @p: the process that the cpu time gets accounted to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 * @cputime: the cpu time spent in user space since the last update
3609 */
3610void account_user_time(struct task_struct *p, cputime_t cputime)
3611{
3612 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3613 cputime64_t tmp;
3614
3615 p->utime = cputime_add(p->utime, cputime);
3616
3617 /* Add user time to cpustat. */
3618 tmp = cputime_to_cputime64(cputime);
3619 if (TASK_NICE(p) > 0)
3620 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3621 else
3622 cpustat->user = cputime64_add(cpustat->user, tmp);
3623}
3624
3625/*
Laurent Vivier94886b82007-10-15 17:00:19 +02003626 * Account guest cpu time to a process.
3627 * @p: the process that the cpu time gets accounted to
3628 * @cputime: the cpu time spent in virtual machine since the last update
3629 */
Adrian Bunkf7402e02007-10-29 21:18:10 +01003630static void account_guest_time(struct task_struct *p, cputime_t cputime)
Laurent Vivier94886b82007-10-15 17:00:19 +02003631{
3632 cputime64_t tmp;
3633 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3634
3635 tmp = cputime_to_cputime64(cputime);
3636
3637 p->utime = cputime_add(p->utime, cputime);
3638 p->gtime = cputime_add(p->gtime, cputime);
3639
3640 cpustat->user = cputime64_add(cpustat->user, tmp);
3641 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3642}
3643
3644/*
Michael Neulingc66f08b2007-10-18 03:06:34 -07003645 * Account scaled user cpu time to a process.
3646 * @p: the process that the cpu time gets accounted to
3647 * @cputime: the cpu time spent in user space since the last update
3648 */
3649void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
3650{
3651 p->utimescaled = cputime_add(p->utimescaled, cputime);
3652}
3653
3654/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655 * Account system cpu time to a process.
3656 * @p: the process that the cpu time gets accounted to
3657 * @hardirq_offset: the offset to subtract from hardirq_count()
3658 * @cputime: the cpu time spent in kernel space since the last update
3659 */
3660void account_system_time(struct task_struct *p, int hardirq_offset,
3661 cputime_t cputime)
3662{
3663 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003664 struct rq *rq = this_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 cputime64_t tmp;
3666
Christian Borntraeger97783852007-11-15 20:57:39 +01003667 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
3668 return account_guest_time(p, cputime);
Laurent Vivier94886b82007-10-15 17:00:19 +02003669
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 p->stime = cputime_add(p->stime, cputime);
3671
3672 /* Add system time to cpustat. */
3673 tmp = cputime_to_cputime64(cputime);
3674 if (hardirq_count() - hardirq_offset)
3675 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3676 else if (softirq_count())
3677 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
Andrew Mortoncfb52852007-11-14 16:59:45 -08003678 else if (p != rq->idle)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 cpustat->system = cputime64_add(cpustat->system, tmp);
Andrew Mortoncfb52852007-11-14 16:59:45 -08003680 else if (atomic_read(&rq->nr_iowait) > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3682 else
3683 cpustat->idle = cputime64_add(cpustat->idle, tmp);
3684 /* Account for system time used */
3685 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686}
3687
3688/*
Michael Neulingc66f08b2007-10-18 03:06:34 -07003689 * Account scaled system cpu time to a process.
3690 * @p: the process that the cpu time gets accounted to
3691 * @hardirq_offset: the offset to subtract from hardirq_count()
3692 * @cputime: the cpu time spent in kernel space since the last update
3693 */
3694void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
3695{
3696 p->stimescaled = cputime_add(p->stimescaled, cputime);
3697}
3698
3699/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 * Account for involuntary wait time.
3701 * @p: the process from which the cpu time has been stolen
3702 * @steal: the cpu time spent in involuntary wait
3703 */
3704void account_steal_time(struct task_struct *p, cputime_t steal)
3705{
3706 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3707 cputime64_t tmp = cputime_to_cputime64(steal);
Ingo Molnar70b97a72006-07-03 00:25:42 -07003708 struct rq *rq = this_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709
3710 if (p == rq->idle) {
3711 p->stime = cputime_add(p->stime, steal);
3712 if (atomic_read(&rq->nr_iowait) > 0)
3713 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3714 else
3715 cpustat->idle = cputime64_add(cpustat->idle, tmp);
Andrew Mortoncfb52852007-11-14 16:59:45 -08003716 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 cpustat->steal = cputime64_add(cpustat->steal, tmp);
3718}
3719
Christoph Lameter7835b982006-12-10 02:20:22 -08003720/*
3721 * This function gets called by the timer code, with HZ frequency.
3722 * We call it with interrupts disabled.
3723 *
3724 * It also gets called by the fork code, when changing the parent's
3725 * timeslices.
3726 */
3727void scheduler_tick(void)
3728{
Christoph Lameter7835b982006-12-10 02:20:22 -08003729 int cpu = smp_processor_id();
3730 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003731 struct task_struct *curr = rq->curr;
Ingo Molnar529c7722007-08-10 23:05:11 +02003732 u64 next_tick = rq->tick_timestamp + TICK_NSEC;
Christoph Lameter7835b982006-12-10 02:20:22 -08003733
Ingo Molnardd41f592007-07-09 18:51:59 +02003734 spin_lock(&rq->lock);
Ingo Molnar546fe3c2007-08-09 11:16:51 +02003735 __update_rq_clock(rq);
Ingo Molnar529c7722007-08-10 23:05:11 +02003736 /*
3737 * Let rq->clock advance by at least TICK_NSEC:
3738 */
3739 if (unlikely(rq->clock < next_tick))
3740 rq->clock = next_tick;
3741 rq->tick_timestamp = rq->clock;
Ingo Molnarf1a438d2007-08-09 11:16:45 +02003742 update_cpu_load(rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01003743 curr->sched_class->task_tick(rq, curr, 0);
3744 update_sched_rt_period(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02003745 spin_unlock(&rq->lock);
3746
Christoph Lametere418e1c2006-12-10 02:20:23 -08003747#ifdef CONFIG_SMP
Ingo Molnardd41f592007-07-09 18:51:59 +02003748 rq->idle_at_tick = idle_cpu(cpu);
3749 trigger_load_balance(rq, cpu);
Christoph Lametere418e1c2006-12-10 02:20:23 -08003750#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751}
3752
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
3754
3755void fastcall add_preempt_count(int val)
3756{
3757 /*
3758 * Underflow?
3759 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003760 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3761 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 preempt_count() += val;
3763 /*
3764 * Spinlock count overflowing soon?
3765 */
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08003766 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3767 PREEMPT_MASK - 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768}
3769EXPORT_SYMBOL(add_preempt_count);
3770
3771void fastcall sub_preempt_count(int val)
3772{
3773 /*
3774 * Underflow?
3775 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003776 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3777 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778 /*
3779 * Is the spinlock portion underflowing?
3780 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003781 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3782 !(preempt_count() & PREEMPT_MASK)))
3783 return;
3784
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 preempt_count() -= val;
3786}
3787EXPORT_SYMBOL(sub_preempt_count);
3788
3789#endif
3790
3791/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003792 * Print scheduling while atomic bug:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003794static noinline void __schedule_bug(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795{
Satyam Sharma838225b2007-10-24 18:23:50 +02003796 struct pt_regs *regs = get_irq_regs();
3797
3798 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3799 prev->comm, prev->pid, preempt_count());
3800
Ingo Molnardd41f592007-07-09 18:51:59 +02003801 debug_show_held_locks(prev);
3802 if (irqs_disabled())
3803 print_irqtrace_events(prev);
Satyam Sharma838225b2007-10-24 18:23:50 +02003804
3805 if (regs)
3806 show_regs(regs);
3807 else
3808 dump_stack();
Ingo Molnardd41f592007-07-09 18:51:59 +02003809}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810
Ingo Molnardd41f592007-07-09 18:51:59 +02003811/*
3812 * Various schedule()-time debugging checks and statistics:
3813 */
3814static inline void schedule_debug(struct task_struct *prev)
3815{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 /*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003817 * Test if we are atomic. Since do_exit() needs to call into
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818 * schedule() atomically, we ignore that path for now.
3819 * Otherwise, whine if we are scheduling when we should not be.
3820 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003821 if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state))
3822 __schedule_bug(prev);
3823
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3825
Ingo Molnar2d723762007-10-15 17:00:12 +02003826 schedstat_inc(this_rq(), sched_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02003827#ifdef CONFIG_SCHEDSTATS
3828 if (unlikely(prev->lock_depth >= 0)) {
Ingo Molnar2d723762007-10-15 17:00:12 +02003829 schedstat_inc(this_rq(), bkl_count);
3830 schedstat_inc(prev, sched_info.bkl_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02003831 }
3832#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02003833}
3834
3835/*
3836 * Pick up the highest-prio task:
3837 */
3838static inline struct task_struct *
Ingo Molnarff95f3d2007-08-09 11:16:49 +02003839pick_next_task(struct rq *rq, struct task_struct *prev)
Ingo Molnardd41f592007-07-09 18:51:59 +02003840{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02003841 const struct sched_class *class;
Ingo Molnardd41f592007-07-09 18:51:59 +02003842 struct task_struct *p;
3843
3844 /*
3845 * Optimization: we know that if all tasks are in
3846 * the fair class we can call that function directly:
3847 */
3848 if (likely(rq->nr_running == rq->cfs.nr_running)) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003849 p = fair_sched_class.pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02003850 if (likely(p))
3851 return p;
3852 }
3853
3854 class = sched_class_highest;
3855 for ( ; ; ) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003856 p = class->pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02003857 if (p)
3858 return p;
3859 /*
3860 * Will never be NULL as the idle class always
3861 * returns a non-NULL p:
3862 */
3863 class = class->next;
3864 }
3865}
3866
3867/*
3868 * schedule() is the main scheduler function.
3869 */
3870asmlinkage void __sched schedule(void)
3871{
3872 struct task_struct *prev, *next;
3873 long *switch_count;
3874 struct rq *rq;
Ingo Molnardd41f592007-07-09 18:51:59 +02003875 int cpu;
3876
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877need_resched:
3878 preempt_disable();
Ingo Molnardd41f592007-07-09 18:51:59 +02003879 cpu = smp_processor_id();
3880 rq = cpu_rq(cpu);
3881 rcu_qsctr_inc(cpu);
3882 prev = rq->curr;
3883 switch_count = &prev->nivcsw;
3884
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 release_kernel_lock(prev);
3886need_resched_nonpreemptible:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887
Ingo Molnardd41f592007-07-09 18:51:59 +02003888 schedule_debug(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003890 hrtick_clear(rq);
3891
Ingo Molnar1e819952007-10-15 17:00:13 +02003892 /*
3893 * Do the rq-clock update outside the rq lock:
3894 */
3895 local_irq_disable();
Ingo Molnarc1b3da32007-08-09 11:16:47 +02003896 __update_rq_clock(rq);
Ingo Molnar1e819952007-10-15 17:00:13 +02003897 spin_lock(&rq->lock);
3898 clear_tsk_need_resched(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899
Ingo Molnardd41f592007-07-09 18:51:59 +02003900 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3901 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
3902 unlikely(signal_pending(prev)))) {
3903 prev->state = TASK_RUNNING;
3904 } else {
Ingo Molnar2e1cb742007-08-09 11:16:49 +02003905 deactivate_task(rq, prev, 1);
Ingo Molnardd41f592007-07-09 18:51:59 +02003906 }
3907 switch_count = &prev->nvcsw;
3908 }
3909
Steven Rostedt9a897c52008-01-25 21:08:22 +01003910#ifdef CONFIG_SMP
3911 if (prev->sched_class->pre_schedule)
3912 prev->sched_class->pre_schedule(rq, prev);
3913#endif
Steven Rostedtf65eda42008-01-25 21:08:07 +01003914
Ingo Molnardd41f592007-07-09 18:51:59 +02003915 if (unlikely(!rq->nr_running))
3916 idle_balance(cpu, rq);
3917
Ingo Molnar31ee5292007-08-09 11:16:49 +02003918 prev->sched_class->put_prev_task(rq, prev);
Ingo Molnarff95f3d2007-08-09 11:16:49 +02003919 next = pick_next_task(rq, prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920
3921 sched_info_switch(prev, next);
Ingo Molnardd41f592007-07-09 18:51:59 +02003922
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923 if (likely(prev != next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 rq->nr_switches++;
3925 rq->curr = next;
3926 ++*switch_count;
3927
Ingo Molnardd41f592007-07-09 18:51:59 +02003928 context_switch(rq, prev, next); /* unlocks the rq */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003929 /*
3930 * the context switch might have flipped the stack from under
3931 * us, hence refresh the local variables.
3932 */
3933 cpu = smp_processor_id();
3934 rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 } else
3936 spin_unlock_irq(&rq->lock);
3937
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003938 hrtick_set(rq);
3939
3940 if (unlikely(reacquire_kernel_lock(current) < 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 goto need_resched_nonpreemptible;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003942
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943 preempt_enable_no_resched();
3944 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3945 goto need_resched;
3946}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947EXPORT_SYMBOL(schedule);
3948
3949#ifdef CONFIG_PREEMPT
3950/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07003951 * this is the entry point to schedule() from in-kernel preemption
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003952 * off of preempt_enable. Kernel preemptions off return from interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953 * occur there and call schedule directly.
3954 */
3955asmlinkage void __sched preempt_schedule(void)
3956{
3957 struct thread_info *ti = current_thread_info();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 struct task_struct *task = current;
3959 int saved_lock_depth;
Ingo Molnar6478d882008-01-25 21:08:33 +01003960
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961 /*
3962 * If there is a non-zero preempt_count or interrupts are disabled,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003963 * we do not want to preempt the current task. Just return..
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964 */
Nick Pigginbeed33a2006-10-11 01:21:52 -07003965 if (likely(ti->preempt_count || irqs_disabled()))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 return;
3967
Andi Kleen3a5c3592007-10-15 17:00:14 +02003968 do {
3969 add_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970
Andi Kleen3a5c3592007-10-15 17:00:14 +02003971 /*
3972 * We keep the big kernel semaphore locked, but we
3973 * clear ->lock_depth so that schedule() doesnt
3974 * auto-release the semaphore:
3975 */
Andi Kleen3a5c3592007-10-15 17:00:14 +02003976 saved_lock_depth = task->lock_depth;
3977 task->lock_depth = -1;
Andi Kleen3a5c3592007-10-15 17:00:14 +02003978 schedule();
Andi Kleen3a5c3592007-10-15 17:00:14 +02003979 task->lock_depth = saved_lock_depth;
Andi Kleen3a5c3592007-10-15 17:00:14 +02003980 sub_preempt_count(PREEMPT_ACTIVE);
3981
3982 /*
3983 * Check again in case we missed a preemption opportunity
3984 * between schedule and now.
3985 */
3986 barrier();
3987 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989EXPORT_SYMBOL(preempt_schedule);
3990
3991/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07003992 * this is the entry point to schedule() from kernel preemption
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993 * off of irq context.
3994 * Note, that this is called and return with irqs disabled. This will
3995 * protect us against recursive calling from irq.
3996 */
3997asmlinkage void __sched preempt_schedule_irq(void)
3998{
3999 struct thread_info *ti = current_thread_info();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 struct task_struct *task = current;
4001 int saved_lock_depth;
Ingo Molnar6478d882008-01-25 21:08:33 +01004002
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004003 /* Catch callers which need to be fixed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004 BUG_ON(ti->preempt_count || !irqs_disabled());
4005
Andi Kleen3a5c3592007-10-15 17:00:14 +02004006 do {
4007 add_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008
Andi Kleen3a5c3592007-10-15 17:00:14 +02004009 /*
4010 * We keep the big kernel semaphore locked, but we
4011 * clear ->lock_depth so that schedule() doesnt
4012 * auto-release the semaphore:
4013 */
Andi Kleen3a5c3592007-10-15 17:00:14 +02004014 saved_lock_depth = task->lock_depth;
4015 task->lock_depth = -1;
Andi Kleen3a5c3592007-10-15 17:00:14 +02004016 local_irq_enable();
4017 schedule();
4018 local_irq_disable();
Andi Kleen3a5c3592007-10-15 17:00:14 +02004019 task->lock_depth = saved_lock_depth;
Andi Kleen3a5c3592007-10-15 17:00:14 +02004020 sub_preempt_count(PREEMPT_ACTIVE);
4021
4022 /*
4023 * Check again in case we missed a preemption opportunity
4024 * between schedule and now.
4025 */
4026 barrier();
4027 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028}
4029
4030#endif /* CONFIG_PREEMPT */
4031
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004032int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
4033 void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034{
Ingo Molnar48f24c42006-07-03 00:25:40 -07004035 return try_to_wake_up(curr->private, mode, sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037EXPORT_SYMBOL(default_wake_function);
4038
4039/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004040 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4041 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 * number) then we wake all the non-exclusive tasks and one exclusive task.
4043 *
4044 * There are circumstances in which we can try to wake a task which has already
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004045 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4047 */
4048static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4049 int nr_exclusive, int sync, void *key)
4050{
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004051 wait_queue_t *curr, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004053 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07004054 unsigned flags = curr->flags;
4055
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056 if (curr->func(curr, mode, sync, key) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07004057 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058 break;
4059 }
4060}
4061
4062/**
4063 * __wake_up - wake up threads blocked on a waitqueue.
4064 * @q: the waitqueue
4065 * @mode: which threads
4066 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Martin Waitz67be2dd2005-05-01 08:59:26 -07004067 * @key: is directly passed to the wakeup function
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 */
4069void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004070 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071{
4072 unsigned long flags;
4073
4074 spin_lock_irqsave(&q->lock, flags);
4075 __wake_up_common(q, mode, nr_exclusive, 0, key);
4076 spin_unlock_irqrestore(&q->lock, flags);
4077}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078EXPORT_SYMBOL(__wake_up);
4079
4080/*
4081 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4082 */
4083void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4084{
4085 __wake_up_common(q, mode, 1, 0, NULL);
4086}
4087
4088/**
Martin Waitz67be2dd2005-05-01 08:59:26 -07004089 * __wake_up_sync - wake up threads blocked on a waitqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 * @q: the waitqueue
4091 * @mode: which threads
4092 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4093 *
4094 * The sync wakeup differs that the waker knows that it will schedule
4095 * away soon, so while the target thread will be woken up, it will not
4096 * be migrated to another CPU - ie. the two threads are 'synchronized'
4097 * with each other. This can prevent needless bouncing between CPUs.
4098 *
4099 * On UP it can prevent extra preemption.
4100 */
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004101void fastcall
4102__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103{
4104 unsigned long flags;
4105 int sync = 1;
4106
4107 if (unlikely(!q))
4108 return;
4109
4110 if (unlikely(!nr_exclusive))
4111 sync = 0;
4112
4113 spin_lock_irqsave(&q->lock, flags);
4114 __wake_up_common(q, mode, nr_exclusive, sync, NULL);
4115 spin_unlock_irqrestore(&q->lock, flags);
4116}
4117EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4118
Ingo Molnarb15136e2007-10-24 18:23:48 +02004119void complete(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120{
4121 unsigned long flags;
4122
4123 spin_lock_irqsave(&x->wait.lock, flags);
4124 x->done++;
4125 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
4126 1, 0, NULL);
4127 spin_unlock_irqrestore(&x->wait.lock, flags);
4128}
4129EXPORT_SYMBOL(complete);
4130
Ingo Molnarb15136e2007-10-24 18:23:48 +02004131void complete_all(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132{
4133 unsigned long flags;
4134
4135 spin_lock_irqsave(&x->wait.lock, flags);
4136 x->done += UINT_MAX/2;
4137 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
4138 0, 0, NULL);
4139 spin_unlock_irqrestore(&x->wait.lock, flags);
4140}
4141EXPORT_SYMBOL(complete_all);
4142
Andi Kleen8cbbe862007-10-15 17:00:14 +02004143static inline long __sched
4144do_wait_for_common(struct completion *x, long timeout, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146 if (!x->done) {
4147 DECLARE_WAITQUEUE(wait, current);
4148
4149 wait.flags |= WQ_FLAG_EXCLUSIVE;
4150 __add_wait_queue_tail(&x->wait, &wait);
4151 do {
Andi Kleen8cbbe862007-10-15 17:00:14 +02004152 if (state == TASK_INTERRUPTIBLE &&
4153 signal_pending(current)) {
4154 __remove_wait_queue(&x->wait, &wait);
4155 return -ERESTARTSYS;
4156 }
4157 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004159 timeout = schedule_timeout(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160 spin_lock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004161 if (!timeout) {
4162 __remove_wait_queue(&x->wait, &wait);
4163 return timeout;
4164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165 } while (!x->done);
4166 __remove_wait_queue(&x->wait, &wait);
4167 }
4168 x->done--;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004169 return timeout;
4170}
4171
4172static long __sched
4173wait_for_common(struct completion *x, long timeout, int state)
4174{
4175 might_sleep();
4176
4177 spin_lock_irq(&x->wait.lock);
4178 timeout = do_wait_for_common(x, timeout, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004180 return timeout;
4181}
4182
Ingo Molnarb15136e2007-10-24 18:23:48 +02004183void __sched wait_for_completion(struct completion *x)
Andi Kleen8cbbe862007-10-15 17:00:14 +02004184{
4185 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186}
4187EXPORT_SYMBOL(wait_for_completion);
4188
Ingo Molnarb15136e2007-10-24 18:23:48 +02004189unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4191{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004192 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193}
4194EXPORT_SYMBOL(wait_for_completion_timeout);
4195
Andi Kleen8cbbe862007-10-15 17:00:14 +02004196int __sched wait_for_completion_interruptible(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197{
Andi Kleen51e97992007-10-18 21:32:55 +02004198 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4199 if (t == -ERESTARTSYS)
4200 return t;
4201 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202}
4203EXPORT_SYMBOL(wait_for_completion_interruptible);
4204
Ingo Molnarb15136e2007-10-24 18:23:48 +02004205unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206wait_for_completion_interruptible_timeout(struct completion *x,
4207 unsigned long timeout)
4208{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004209 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210}
4211EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4212
Andi Kleen8cbbe862007-10-15 17:00:14 +02004213static long __sched
4214sleep_on_common(wait_queue_head_t *q, int state, long timeout)
Ingo Molnar0fec1712007-07-09 18:52:01 +02004215{
4216 unsigned long flags;
4217 wait_queue_t wait;
4218
4219 init_waitqueue_entry(&wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220
Andi Kleen8cbbe862007-10-15 17:00:14 +02004221 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222
Andi Kleen8cbbe862007-10-15 17:00:14 +02004223 spin_lock_irqsave(&q->lock, flags);
4224 __add_wait_queue(q, &wait);
4225 spin_unlock(&q->lock);
4226 timeout = schedule_timeout(timeout);
4227 spin_lock_irq(&q->lock);
4228 __remove_wait_queue(q, &wait);
4229 spin_unlock_irqrestore(&q->lock, flags);
4230
4231 return timeout;
4232}
4233
4234void __sched interruptible_sleep_on(wait_queue_head_t *q)
4235{
4236 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238EXPORT_SYMBOL(interruptible_sleep_on);
4239
Ingo Molnar0fec1712007-07-09 18:52:01 +02004240long __sched
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004241interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004243 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4246
Ingo Molnar0fec1712007-07-09 18:52:01 +02004247void __sched sleep_on(wait_queue_head_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004249 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251EXPORT_SYMBOL(sleep_on);
4252
Ingo Molnar0fec1712007-07-09 18:52:01 +02004253long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004255 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257EXPORT_SYMBOL(sleep_on_timeout);
4258
Ingo Molnarb29739f2006-06-27 02:54:51 -07004259#ifdef CONFIG_RT_MUTEXES
4260
4261/*
4262 * rt_mutex_setprio - set the current priority of a task
4263 * @p: task
4264 * @prio: prio value (kernel-internal form)
4265 *
4266 * This function changes the 'effective' priority of a task. It does
4267 * not touch ->normal_prio like __setscheduler().
4268 *
4269 * Used by the rt_mutex code to implement priority inheritance logic.
4270 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004271void rt_mutex_setprio(struct task_struct *p, int prio)
Ingo Molnarb29739f2006-06-27 02:54:51 -07004272{
4273 unsigned long flags;
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004274 int oldprio, on_rq, running;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004275 struct rq *rq;
Steven Rostedtcb469842008-01-25 21:08:22 +01004276 const struct sched_class *prev_class = p->sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004277
4278 BUG_ON(prio < 0 || prio > MAX_PRIO);
4279
4280 rq = task_rq_lock(p, &flags);
Ingo Molnara8e504d2007-08-09 11:16:47 +02004281 update_rq_clock(rq);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004282
Andrew Mortond5f9f942007-05-08 20:27:06 -07004283 oldprio = p->prio;
Ingo Molnardd41f592007-07-09 18:51:59 +02004284 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004285 running = task_current(rq, p);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004286 if (on_rq) {
Ingo Molnar69be72c2007-08-09 11:16:49 +02004287 dequeue_task(rq, p, 0);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004288 if (running)
4289 p->sched_class->put_prev_task(rq, p);
4290 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004291
4292 if (rt_prio(prio))
4293 p->sched_class = &rt_sched_class;
4294 else
4295 p->sched_class = &fair_sched_class;
4296
Ingo Molnarb29739f2006-06-27 02:54:51 -07004297 p->prio = prio;
4298
Ingo Molnardd41f592007-07-09 18:51:59 +02004299 if (on_rq) {
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004300 if (running)
4301 p->sched_class->set_curr_task(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01004302
Ingo Molnar8159f872007-08-09 11:16:49 +02004303 enqueue_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004304
4305 check_class_changed(rq, p, prev_class, oldprio, running);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004306 }
4307 task_rq_unlock(rq, &flags);
4308}
4309
4310#endif
4311
Ingo Molnar36c8b582006-07-03 00:25:41 -07004312void set_user_nice(struct task_struct *p, long nice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313{
Ingo Molnardd41f592007-07-09 18:51:59 +02004314 int old_prio, delta, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004316 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317
4318 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4319 return;
4320 /*
4321 * We have to be careful, if called from sys_setpriority(),
4322 * the task might be in the middle of scheduling on another CPU.
4323 */
4324 rq = task_rq_lock(p, &flags);
Ingo Molnara8e504d2007-08-09 11:16:47 +02004325 update_rq_clock(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326 /*
4327 * The RT priorities are set via sched_setscheduler(), but we still
4328 * allow the 'normal' nice value to be set - but as expected
4329 * it wont have any effect on scheduling until the task is
Ingo Molnardd41f592007-07-09 18:51:59 +02004330 * SCHED_FIFO/SCHED_RR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331 */
Ingo Molnare05606d2007-07-09 18:51:59 +02004332 if (task_has_rt_policy(p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 p->static_prio = NICE_TO_PRIO(nice);
4334 goto out_unlock;
4335 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004336 on_rq = p->se.on_rq;
Srivatsa Vaddagiri58e2d4c2008-01-25 21:08:00 +01004337 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004338 dequeue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 p->static_prio = NICE_TO_PRIO(nice);
Peter Williams2dd73a42006-06-27 02:54:34 -07004341 set_load_weight(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004342 old_prio = p->prio;
4343 p->prio = effective_prio(p);
4344 delta = p->prio - old_prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345
Ingo Molnardd41f592007-07-09 18:51:59 +02004346 if (on_rq) {
Ingo Molnar8159f872007-08-09 11:16:49 +02004347 enqueue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 /*
Andrew Mortond5f9f942007-05-08 20:27:06 -07004349 * If the task increased its priority or is running and
4350 * lowered its priority, then reschedule its CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351 */
Andrew Mortond5f9f942007-05-08 20:27:06 -07004352 if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353 resched_task(rq->curr);
4354 }
4355out_unlock:
4356 task_rq_unlock(rq, &flags);
4357}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358EXPORT_SYMBOL(set_user_nice);
4359
Matt Mackalle43379f2005-05-01 08:59:00 -07004360/*
4361 * can_nice - check if a task can reduce its nice value
4362 * @p: task
4363 * @nice: nice value
4364 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004365int can_nice(const struct task_struct *p, const int nice)
Matt Mackalle43379f2005-05-01 08:59:00 -07004366{
Matt Mackall024f4742005-08-18 11:24:19 -07004367 /* convert nice value [19,-20] to rlimit style value [1,40] */
4368 int nice_rlim = 20 - nice;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004369
Matt Mackalle43379f2005-05-01 08:59:00 -07004370 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
4371 capable(CAP_SYS_NICE));
4372}
4373
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374#ifdef __ARCH_WANT_SYS_NICE
4375
4376/*
4377 * sys_nice - change the priority of the current process.
4378 * @increment: priority increment
4379 *
4380 * sys_setpriority is a more generic, but much slower function that
4381 * does similar things.
4382 */
4383asmlinkage long sys_nice(int increment)
4384{
Ingo Molnar48f24c42006-07-03 00:25:40 -07004385 long nice, retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386
4387 /*
4388 * Setpriority might change our priority at the same moment.
4389 * We don't have to worry. Conceptually one call occurs first
4390 * and we have a single winner.
4391 */
Matt Mackalle43379f2005-05-01 08:59:00 -07004392 if (increment < -40)
4393 increment = -40;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394 if (increment > 40)
4395 increment = 40;
4396
4397 nice = PRIO_TO_NICE(current->static_prio) + increment;
4398 if (nice < -20)
4399 nice = -20;
4400 if (nice > 19)
4401 nice = 19;
4402
Matt Mackalle43379f2005-05-01 08:59:00 -07004403 if (increment < 0 && !can_nice(current, nice))
4404 return -EPERM;
4405
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406 retval = security_task_setnice(current, nice);
4407 if (retval)
4408 return retval;
4409
4410 set_user_nice(current, nice);
4411 return 0;
4412}
4413
4414#endif
4415
4416/**
4417 * task_prio - return the priority value of a given task.
4418 * @p: the task in question.
4419 *
4420 * This is the priority value as seen by users in /proc.
4421 * RT tasks are offset by -200. Normal tasks are centered
4422 * around 0, value goes from -16 to +15.
4423 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004424int task_prio(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425{
4426 return p->prio - MAX_RT_PRIO;
4427}
4428
4429/**
4430 * task_nice - return the nice value of a given task.
4431 * @p: the task in question.
4432 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004433int task_nice(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434{
4435 return TASK_NICE(p);
4436}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437EXPORT_SYMBOL_GPL(task_nice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438
4439/**
4440 * idle_cpu - is a given cpu idle currently?
4441 * @cpu: the processor in question.
4442 */
4443int idle_cpu(int cpu)
4444{
4445 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4446}
4447
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448/**
4449 * idle_task - return the idle task for a given cpu.
4450 * @cpu: the processor in question.
4451 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004452struct task_struct *idle_task(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453{
4454 return cpu_rq(cpu)->idle;
4455}
4456
4457/**
4458 * find_process_by_pid - find a process with a matching PID value.
4459 * @pid: the pid in question.
4460 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02004461static struct task_struct *find_process_by_pid(pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462{
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07004463 return pid ? find_task_by_vpid(pid) : current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464}
4465
4466/* Actually do priority change: must hold rq lock. */
Ingo Molnardd41f592007-07-09 18:51:59 +02004467static void
4468__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469{
Ingo Molnardd41f592007-07-09 18:51:59 +02004470 BUG_ON(p->se.on_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07004471
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 p->policy = policy;
Ingo Molnardd41f592007-07-09 18:51:59 +02004473 switch (p->policy) {
4474 case SCHED_NORMAL:
4475 case SCHED_BATCH:
4476 case SCHED_IDLE:
4477 p->sched_class = &fair_sched_class;
4478 break;
4479 case SCHED_FIFO:
4480 case SCHED_RR:
4481 p->sched_class = &rt_sched_class;
4482 break;
4483 }
4484
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485 p->rt_priority = prio;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004486 p->normal_prio = normal_prio(p);
4487 /* we are holding p->pi_lock already */
4488 p->prio = rt_mutex_getprio(p);
Peter Williams2dd73a42006-06-27 02:54:34 -07004489 set_load_weight(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490}
4491
4492/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08004493 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494 * @p: the task in question.
4495 * @policy: new policy.
4496 * @param: structure containing the new RT priority.
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004497 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08004498 * NOTE that the task may be already dead.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499 */
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004500int sched_setscheduler(struct task_struct *p, int policy,
4501 struct sched_param *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004503 int retval, oldprio, oldpolicy = -1, on_rq, running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 unsigned long flags;
Steven Rostedtcb469842008-01-25 21:08:22 +01004505 const struct sched_class *prev_class = p->sched_class;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004506 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507
Steven Rostedt66e53932006-06-27 02:54:44 -07004508 /* may grab non-irq protected spin_locks */
4509 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510recheck:
4511 /* double check policy once rq lock held */
4512 if (policy < 0)
4513 policy = oldpolicy = p->policy;
4514 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
Ingo Molnardd41f592007-07-09 18:51:59 +02004515 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4516 policy != SCHED_IDLE)
Ingo Molnarb0a94992006-01-14 13:20:41 -08004517 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518 /*
4519 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnardd41f592007-07-09 18:51:59 +02004520 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4521 * SCHED_BATCH and SCHED_IDLE is 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522 */
4523 if (param->sched_priority < 0 ||
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004524 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
Steven Rostedtd46523e2005-07-25 16:28:39 -04004525 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526 return -EINVAL;
Ingo Molnare05606d2007-07-09 18:51:59 +02004527 if (rt_policy(policy) != (param->sched_priority != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528 return -EINVAL;
4529
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004530 /*
4531 * Allow unprivileged RT tasks to decrease priority:
4532 */
4533 if (!capable(CAP_SYS_NICE)) {
Ingo Molnare05606d2007-07-09 18:51:59 +02004534 if (rt_policy(policy)) {
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004535 unsigned long rlim_rtprio;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004536
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004537 if (!lock_task_sighand(p, &flags))
4538 return -ESRCH;
4539 rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
4540 unlock_task_sighand(p, &flags);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004541
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004542 /* can't set/change the rt policy */
4543 if (policy != p->policy && !rlim_rtprio)
4544 return -EPERM;
4545
4546 /* can't increase priority */
4547 if (param->sched_priority > p->rt_priority &&
4548 param->sched_priority > rlim_rtprio)
4549 return -EPERM;
4550 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004551 /*
4552 * Like positive nice levels, dont allow tasks to
4553 * move out of SCHED_IDLE either:
4554 */
4555 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
4556 return -EPERM;
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004557
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004558 /* can't change other user's priorities */
4559 if ((current->euid != p->euid) &&
4560 (current->euid != p->uid))
4561 return -EPERM;
4562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563
4564 retval = security_task_setscheduler(p, policy, param);
4565 if (retval)
4566 return retval;
4567 /*
Ingo Molnarb29739f2006-06-27 02:54:51 -07004568 * make sure no PI-waiters arrive (or leave) while we are
4569 * changing the priority of the task:
4570 */
4571 spin_lock_irqsave(&p->pi_lock, flags);
4572 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573 * To be able to change p->policy safely, the apropriate
4574 * runqueue lock must be held.
4575 */
Ingo Molnarb29739f2006-06-27 02:54:51 -07004576 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577 /* recheck policy now with rq lock held */
4578 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4579 policy = oldpolicy = -1;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004580 __task_rq_unlock(rq);
4581 spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582 goto recheck;
4583 }
Ingo Molnar2daa3572007-08-09 11:16:51 +02004584 update_rq_clock(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004585 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004586 running = task_current(rq, p);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004587 if (on_rq) {
Ingo Molnar2e1cb742007-08-09 11:16:49 +02004588 deactivate_task(rq, p, 0);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004589 if (running)
4590 p->sched_class->put_prev_task(rq, p);
4591 }
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02004592
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 oldprio = p->prio;
Ingo Molnardd41f592007-07-09 18:51:59 +02004594 __setscheduler(rq, p, policy, param->sched_priority);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02004595
Ingo Molnardd41f592007-07-09 18:51:59 +02004596 if (on_rq) {
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004597 if (running)
4598 p->sched_class->set_curr_task(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01004599
Ingo Molnardd41f592007-07-09 18:51:59 +02004600 activate_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004601
4602 check_class_changed(rq, p, prev_class, oldprio, running);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 }
Ingo Molnarb29739f2006-06-27 02:54:51 -07004604 __task_rq_unlock(rq);
4605 spin_unlock_irqrestore(&p->pi_lock, flags);
4606
Thomas Gleixner95e02ca2006-06-27 02:55:02 -07004607 rt_mutex_adjust_pi(p);
4608
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609 return 0;
4610}
4611EXPORT_SYMBOL_GPL(sched_setscheduler);
4612
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004613static int
4614do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 struct sched_param lparam;
4617 struct task_struct *p;
Ingo Molnar36c8b582006-07-03 00:25:41 -07004618 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619
4620 if (!param || pid < 0)
4621 return -EINVAL;
4622 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4623 return -EFAULT;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004624
4625 rcu_read_lock();
4626 retval = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 p = find_process_by_pid(pid);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004628 if (p != NULL)
4629 retval = sched_setscheduler(p, policy, &lparam);
4630 rcu_read_unlock();
Ingo Molnar36c8b582006-07-03 00:25:41 -07004631
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632 return retval;
4633}
4634
4635/**
4636 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4637 * @pid: the pid in question.
4638 * @policy: new policy.
4639 * @param: structure containing the new RT priority.
4640 */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004641asmlinkage long
4642sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643{
Jason Baronc21761f2006-01-18 17:43:03 -08004644 /* negative values for policy are not valid */
4645 if (policy < 0)
4646 return -EINVAL;
4647
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648 return do_sched_setscheduler(pid, policy, param);
4649}
4650
4651/**
4652 * sys_sched_setparam - set/change the RT priority of a thread
4653 * @pid: the pid in question.
4654 * @param: structure containing the new RT priority.
4655 */
4656asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
4657{
4658 return do_sched_setscheduler(pid, -1, param);
4659}
4660
4661/**
4662 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4663 * @pid: the pid in question.
4664 */
4665asmlinkage long sys_sched_getscheduler(pid_t pid)
4666{
Ingo Molnar36c8b582006-07-03 00:25:41 -07004667 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02004668 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669
4670 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02004671 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672
4673 retval = -ESRCH;
4674 read_lock(&tasklist_lock);
4675 p = find_process_by_pid(pid);
4676 if (p) {
4677 retval = security_task_getscheduler(p);
4678 if (!retval)
4679 retval = p->policy;
4680 }
4681 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 return retval;
4683}
4684
4685/**
4686 * sys_sched_getscheduler - get the RT priority of a thread
4687 * @pid: the pid in question.
4688 * @param: structure containing the RT priority.
4689 */
4690asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
4691{
4692 struct sched_param lp;
Ingo Molnar36c8b582006-07-03 00:25:41 -07004693 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02004694 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695
4696 if (!param || pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02004697 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698
4699 read_lock(&tasklist_lock);
4700 p = find_process_by_pid(pid);
4701 retval = -ESRCH;
4702 if (!p)
4703 goto out_unlock;
4704
4705 retval = security_task_getscheduler(p);
4706 if (retval)
4707 goto out_unlock;
4708
4709 lp.sched_priority = p->rt_priority;
4710 read_unlock(&tasklist_lock);
4711
4712 /*
4713 * This one might sleep, we cannot do it with a spinlock held ...
4714 */
4715 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4716
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 return retval;
4718
4719out_unlock:
4720 read_unlock(&tasklist_lock);
4721 return retval;
4722}
4723
4724long sched_setaffinity(pid_t pid, cpumask_t new_mask)
4725{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726 cpumask_t cpus_allowed;
Ingo Molnar36c8b582006-07-03 00:25:41 -07004727 struct task_struct *p;
4728 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004730 get_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731 read_lock(&tasklist_lock);
4732
4733 p = find_process_by_pid(pid);
4734 if (!p) {
4735 read_unlock(&tasklist_lock);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004736 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737 return -ESRCH;
4738 }
4739
4740 /*
4741 * It is not safe to call set_cpus_allowed with the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004742 * tasklist_lock held. We will bump the task_struct's
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 * usage count and then drop tasklist_lock.
4744 */
4745 get_task_struct(p);
4746 read_unlock(&tasklist_lock);
4747
4748 retval = -EPERM;
4749 if ((current->euid != p->euid) && (current->euid != p->uid) &&
4750 !capable(CAP_SYS_NICE))
4751 goto out_unlock;
4752
David Quigleye7834f82006-06-23 02:03:59 -07004753 retval = security_task_setscheduler(p, 0, NULL);
4754 if (retval)
4755 goto out_unlock;
4756
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 cpus_allowed = cpuset_cpus_allowed(p);
4758 cpus_and(new_mask, new_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07004759 again:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004760 retval = set_cpus_allowed(p, new_mask);
4761
Paul Menage8707d8b2007-10-18 23:40:22 -07004762 if (!retval) {
4763 cpus_allowed = cpuset_cpus_allowed(p);
4764 if (!cpus_subset(new_mask, cpus_allowed)) {
4765 /*
4766 * We must have raced with a concurrent cpuset
4767 * update. Just reset the cpus_allowed to the
4768 * cpuset's cpus_allowed
4769 */
4770 new_mask = cpus_allowed;
4771 goto again;
4772 }
4773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004774out_unlock:
4775 put_task_struct(p);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004776 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777 return retval;
4778}
4779
4780static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4781 cpumask_t *new_mask)
4782{
4783 if (len < sizeof(cpumask_t)) {
4784 memset(new_mask, 0, sizeof(cpumask_t));
4785 } else if (len > sizeof(cpumask_t)) {
4786 len = sizeof(cpumask_t);
4787 }
4788 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4789}
4790
4791/**
4792 * sys_sched_setaffinity - set the cpu affinity of a process
4793 * @pid: pid of the process
4794 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4795 * @user_mask_ptr: user-space pointer to the new cpu mask
4796 */
4797asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
4798 unsigned long __user *user_mask_ptr)
4799{
4800 cpumask_t new_mask;
4801 int retval;
4802
4803 retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
4804 if (retval)
4805 return retval;
4806
4807 return sched_setaffinity(pid, new_mask);
4808}
4809
4810/*
4811 * Represents all cpu's present in the system
4812 * In systems capable of hotplug, this map could dynamically grow
4813 * as new cpu's are detected in the system via any platform specific
4814 * method, such as ACPI for e.g.
4815 */
4816
Andi Kleen4cef0c62006-01-11 22:44:57 +01004817cpumask_t cpu_present_map __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818EXPORT_SYMBOL(cpu_present_map);
4819
4820#ifndef CONFIG_SMP
Andi Kleen4cef0c62006-01-11 22:44:57 +01004821cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
Greg Bankse16b38f2006-10-02 02:17:40 -07004822EXPORT_SYMBOL(cpu_online_map);
4823
Andi Kleen4cef0c62006-01-11 22:44:57 +01004824cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
Greg Bankse16b38f2006-10-02 02:17:40 -07004825EXPORT_SYMBOL(cpu_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826#endif
4827
4828long sched_getaffinity(pid_t pid, cpumask_t *mask)
4829{
Ingo Molnar36c8b582006-07-03 00:25:41 -07004830 struct task_struct *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004832
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004833 get_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834 read_lock(&tasklist_lock);
4835
4836 retval = -ESRCH;
4837 p = find_process_by_pid(pid);
4838 if (!p)
4839 goto out_unlock;
4840
David Quigleye7834f82006-06-23 02:03:59 -07004841 retval = security_task_getscheduler(p);
4842 if (retval)
4843 goto out_unlock;
4844
Jack Steiner2f7016d2006-02-01 03:05:18 -08004845 cpus_and(*mask, p->cpus_allowed, cpu_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004846
4847out_unlock:
4848 read_unlock(&tasklist_lock);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004849 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004850
Ulrich Drepper9531b622007-08-09 11:16:46 +02004851 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852}
4853
4854/**
4855 * sys_sched_getaffinity - get the cpu affinity of a process
4856 * @pid: pid of the process
4857 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4858 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4859 */
4860asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
4861 unsigned long __user *user_mask_ptr)
4862{
4863 int ret;
4864 cpumask_t mask;
4865
4866 if (len < sizeof(cpumask_t))
4867 return -EINVAL;
4868
4869 ret = sched_getaffinity(pid, &mask);
4870 if (ret < 0)
4871 return ret;
4872
4873 if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
4874 return -EFAULT;
4875
4876 return sizeof(cpumask_t);
4877}
4878
4879/**
4880 * sys_sched_yield - yield the current processor to other threads.
4881 *
Ingo Molnardd41f592007-07-09 18:51:59 +02004882 * This function yields the current CPU to other tasks. If there are no
4883 * other threads running on this CPU then this function will return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884 */
4885asmlinkage long sys_sched_yield(void)
4886{
Ingo Molnar70b97a72006-07-03 00:25:42 -07004887 struct rq *rq = this_rq_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888
Ingo Molnar2d723762007-10-15 17:00:12 +02004889 schedstat_inc(rq, yld_count);
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02004890 current->sched_class->yield_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004891
4892 /*
4893 * Since we are going to call schedule() anyway, there's
4894 * no need to preempt or enable interrupts:
4895 */
4896 __release(rq->lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07004897 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898 _raw_spin_unlock(&rq->lock);
4899 preempt_enable_no_resched();
4900
4901 schedule();
4902
4903 return 0;
4904}
4905
Andrew Mortone7b38402006-06-30 01:56:00 -07004906static void __cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004907{
Ingo Molnar8e0a43d2006-06-23 02:05:23 -07004908#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
4909 __might_sleep(__FILE__, __LINE__);
4910#endif
Ingo Molnar5bbcfd92005-07-07 17:57:04 -07004911 /*
4912 * The BKS might be reacquired before we have dropped
4913 * PREEMPT_ACTIVE, which could trigger a second
4914 * cond_resched() call.
4915 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004916 do {
4917 add_preempt_count(PREEMPT_ACTIVE);
4918 schedule();
4919 sub_preempt_count(PREEMPT_ACTIVE);
4920 } while (need_resched());
4921}
4922
Herbert Xu02b67cc32008-01-25 21:08:28 +01004923#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
4924int __sched _cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925{
Ingo Molnar94142322006-12-29 16:48:13 -08004926 if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
4927 system_state == SYSTEM_RUNNING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004928 __cond_resched();
4929 return 1;
4930 }
4931 return 0;
4932}
Herbert Xu02b67cc32008-01-25 21:08:28 +01004933EXPORT_SYMBOL(_cond_resched);
4934#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004935
4936/*
4937 * cond_resched_lock() - if a reschedule is pending, drop the given lock,
4938 * call schedule, and on return reacquire the lock.
4939 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004940 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
Linus Torvalds1da177e2005-04-16 15:20:36 -07004941 * operations here to prevent schedule() from being called twice (once via
4942 * spin_unlock(), once by hand).
4943 */
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004944int cond_resched_lock(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945{
Jan Kara6df3cec2005-06-13 15:52:32 -07004946 int ret = 0;
4947
Linus Torvalds1da177e2005-04-16 15:20:36 -07004948 if (need_lockbreak(lock)) {
4949 spin_unlock(lock);
4950 cpu_relax();
Jan Kara6df3cec2005-06-13 15:52:32 -07004951 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952 spin_lock(lock);
4953 }
Ingo Molnar94142322006-12-29 16:48:13 -08004954 if (need_resched() && system_state == SYSTEM_RUNNING) {
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07004955 spin_release(&lock->dep_map, 1, _THIS_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956 _raw_spin_unlock(lock);
4957 preempt_enable_no_resched();
4958 __cond_resched();
Jan Kara6df3cec2005-06-13 15:52:32 -07004959 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004960 spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961 }
Jan Kara6df3cec2005-06-13 15:52:32 -07004962 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964EXPORT_SYMBOL(cond_resched_lock);
4965
4966int __sched cond_resched_softirq(void)
4967{
4968 BUG_ON(!in_softirq());
4969
Ingo Molnar94142322006-12-29 16:48:13 -08004970 if (need_resched() && system_state == SYSTEM_RUNNING) {
Thomas Gleixner98d825672007-05-23 13:58:18 -07004971 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972 __cond_resched();
4973 local_bh_disable();
4974 return 1;
4975 }
4976 return 0;
4977}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978EXPORT_SYMBOL(cond_resched_softirq);
4979
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980/**
4981 * yield - yield the current processor to other threads.
4982 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08004983 * This is a shortcut for kernel-space yielding - it marks the
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 * thread runnable and calls sys_sched_yield().
4985 */
4986void __sched yield(void)
4987{
4988 set_current_state(TASK_RUNNING);
4989 sys_sched_yield();
4990}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991EXPORT_SYMBOL(yield);
4992
4993/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004994 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
Linus Torvalds1da177e2005-04-16 15:20:36 -07004995 * that process accounting knows that this is a task in IO wait state.
4996 *
4997 * But don't do that if it is a deliberate, throttling IO wait (this task
4998 * has set its backing_dev_info: the queue against which it should throttle)
4999 */
5000void __sched io_schedule(void)
5001{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005002 struct rq *rq = &__raw_get_cpu_var(runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005004 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005 atomic_inc(&rq->nr_iowait);
5006 schedule();
5007 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005008 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010EXPORT_SYMBOL(io_schedule);
5011
5012long __sched io_schedule_timeout(long timeout)
5013{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005014 struct rq *rq = &__raw_get_cpu_var(runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005015 long ret;
5016
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005017 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018 atomic_inc(&rq->nr_iowait);
5019 ret = schedule_timeout(timeout);
5020 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005021 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022 return ret;
5023}
5024
5025/**
5026 * sys_sched_get_priority_max - return maximum RT priority.
5027 * @policy: scheduling class.
5028 *
5029 * this syscall returns the maximum rt_priority that can be used
5030 * by a given scheduling class.
5031 */
5032asmlinkage long sys_sched_get_priority_max(int policy)
5033{
5034 int ret = -EINVAL;
5035
5036 switch (policy) {
5037 case SCHED_FIFO:
5038 case SCHED_RR:
5039 ret = MAX_USER_RT_PRIO-1;
5040 break;
5041 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005042 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005043 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005044 ret = 0;
5045 break;
5046 }
5047 return ret;
5048}
5049
5050/**
5051 * sys_sched_get_priority_min - return minimum RT priority.
5052 * @policy: scheduling class.
5053 *
5054 * this syscall returns the minimum rt_priority that can be used
5055 * by a given scheduling class.
5056 */
5057asmlinkage long sys_sched_get_priority_min(int policy)
5058{
5059 int ret = -EINVAL;
5060
5061 switch (policy) {
5062 case SCHED_FIFO:
5063 case SCHED_RR:
5064 ret = 1;
5065 break;
5066 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005067 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005068 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069 ret = 0;
5070 }
5071 return ret;
5072}
5073
5074/**
5075 * sys_sched_rr_get_interval - return the default timeslice of a process.
5076 * @pid: pid of the process.
5077 * @interval: userspace pointer to the timeslice value.
5078 *
5079 * this syscall writes the default timeslice value of a given process
5080 * into the user-space timespec buffer. A value of '0' means infinity.
5081 */
5082asmlinkage
5083long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
5084{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005085 struct task_struct *p;
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005086 unsigned int time_slice;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005087 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088 struct timespec t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089
5090 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005091 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005092
5093 retval = -ESRCH;
5094 read_lock(&tasklist_lock);
5095 p = find_process_by_pid(pid);
5096 if (!p)
5097 goto out_unlock;
5098
5099 retval = security_task_getscheduler(p);
5100 if (retval)
5101 goto out_unlock;
5102
Ingo Molnar77034932007-12-04 17:04:39 +01005103 /*
5104 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
5105 * tasks that are on an otherwise idle runqueue:
5106 */
5107 time_slice = 0;
5108 if (p->policy == SCHED_RR) {
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005109 time_slice = DEF_TIMESLICE;
Ingo Molnar77034932007-12-04 17:04:39 +01005110 } else {
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005111 struct sched_entity *se = &p->se;
5112 unsigned long flags;
5113 struct rq *rq;
5114
5115 rq = task_rq_lock(p, &flags);
Ingo Molnar77034932007-12-04 17:04:39 +01005116 if (rq->cfs.load.weight)
5117 time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005118 task_rq_unlock(rq, &flags);
5119 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120 read_unlock(&tasklist_lock);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005121 jiffies_to_timespec(time_slice, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123 return retval;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005124
Linus Torvalds1da177e2005-04-16 15:20:36 -07005125out_unlock:
5126 read_unlock(&tasklist_lock);
5127 return retval;
5128}
5129
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005130static const char stat_nam[] = "RSDTtZX";
Ingo Molnar36c8b582006-07-03 00:25:41 -07005131
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005132void sched_show_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134 unsigned long free = 0;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005135 unsigned state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005136
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137 state = p->state ? __ffs(p->state) + 1 : 0;
Ingo Molnarcc4ea792007-10-18 21:32:56 +02005138 printk(KERN_INFO "%-13.13s %c", p->comm,
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005139 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
Ingo Molnar4bd77322007-07-11 21:21:47 +02005140#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141 if (state == TASK_RUNNING)
Ingo Molnarcc4ea792007-10-18 21:32:56 +02005142 printk(KERN_CONT " running ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 else
Ingo Molnarcc4ea792007-10-18 21:32:56 +02005144 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145#else
5146 if (state == TASK_RUNNING)
Ingo Molnarcc4ea792007-10-18 21:32:56 +02005147 printk(KERN_CONT " running task ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 else
Ingo Molnarcc4ea792007-10-18 21:32:56 +02005149 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150#endif
5151#ifdef CONFIG_DEBUG_STACK_USAGE
5152 {
Al Viro10ebffd2005-11-13 16:06:56 -08005153 unsigned long *n = end_of_stack(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154 while (!*n)
5155 n++;
Al Viro10ebffd2005-11-13 16:06:56 -08005156 free = (unsigned long)n - (unsigned long)end_of_stack(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157 }
5158#endif
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07005159 printk(KERN_CONT "%5lu %5d %6d\n", free,
Roland McGrathfcfd50a2008-01-09 00:03:23 -08005160 task_pid_nr(p), task_pid_nr(p->real_parent));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161
5162 if (state != TASK_RUNNING)
5163 show_stack(p, NULL);
5164}
5165
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005166void show_state_filter(unsigned long state_filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005168 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169
Ingo Molnar4bd77322007-07-11 21:21:47 +02005170#if BITS_PER_LONG == 32
5171 printk(KERN_INFO
5172 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173#else
Ingo Molnar4bd77322007-07-11 21:21:47 +02005174 printk(KERN_INFO
5175 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176#endif
5177 read_lock(&tasklist_lock);
5178 do_each_thread(g, p) {
5179 /*
5180 * reset the NMI-timeout, listing all files on a slow
5181 * console might take alot of time:
5182 */
5183 touch_nmi_watchdog();
Ingo Molnar39bc89f2007-04-25 20:50:03 -07005184 if (!state_filter || (p->state & state_filter))
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005185 sched_show_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186 } while_each_thread(g, p);
5187
Jeremy Fitzhardinge04c91672007-05-08 00:28:05 -07005188 touch_all_softlockup_watchdogs();
5189
Ingo Molnardd41f592007-07-09 18:51:59 +02005190#ifdef CONFIG_SCHED_DEBUG
5191 sysrq_sched_debug_show();
5192#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193 read_unlock(&tasklist_lock);
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005194 /*
5195 * Only show locks if all tasks are dumped:
5196 */
5197 if (state_filter == -1)
5198 debug_show_all_locks();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199}
5200
Ingo Molnar1df21052007-07-09 18:51:58 +02005201void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5202{
Ingo Molnardd41f592007-07-09 18:51:59 +02005203 idle->sched_class = &idle_sched_class;
Ingo Molnar1df21052007-07-09 18:51:58 +02005204}
5205
Ingo Molnarf340c0d2005-06-28 16:40:42 +02005206/**
5207 * init_idle - set up an idle thread for a given CPU
5208 * @idle: task in question
5209 * @cpu: cpu the idle task belongs to
5210 *
5211 * NOTE: this function does not set the idle thread's NEED_RESCHED
5212 * flag, to make booting more robust.
5213 */
Nick Piggin5c1e1762006-10-03 01:14:04 -07005214void __cpuinit init_idle(struct task_struct *idle, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005216 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217 unsigned long flags;
5218
Ingo Molnardd41f592007-07-09 18:51:59 +02005219 __sched_fork(idle);
5220 idle->se.exec_start = sched_clock();
5221
Ingo Molnarb29739f2006-06-27 02:54:51 -07005222 idle->prio = idle->normal_prio = MAX_PRIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 idle->cpus_allowed = cpumask_of_cpu(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005224 __set_task_cpu(idle, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225
5226 spin_lock_irqsave(&rq->lock, flags);
5227 rq->curr = rq->idle = idle;
Nick Piggin4866cde2005-06-25 14:57:23 -07005228#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5229 idle->oncpu = 1;
5230#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 spin_unlock_irqrestore(&rq->lock, flags);
5232
5233 /* Set the preempt count _outside_ the spinlocks! */
Al Viroa1261f52005-11-13 16:06:55 -08005234 task_thread_info(idle)->preempt_count = 0;
Ingo Molnar6478d882008-01-25 21:08:33 +01005235
Ingo Molnardd41f592007-07-09 18:51:59 +02005236 /*
5237 * The idle tasks have their own, simple scheduling class:
5238 */
5239 idle->sched_class = &idle_sched_class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240}
5241
5242/*
5243 * In a system that switches off the HZ timer nohz_cpu_mask
5244 * indicates which cpus entered this state. This is used
5245 * in the rcu update to wait only for active cpus. For system
5246 * which do not switch off the HZ timer nohz_cpu_mask should
5247 * always be CPU_MASK_NONE.
5248 */
5249cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
5250
Ingo Molnar19978ca2007-11-09 22:39:38 +01005251/*
5252 * Increase the granularity value when there are more CPUs,
5253 * because with more CPUs the 'effective latency' as visible
5254 * to users decreases. But the relationship is not linear,
5255 * so pick a second-best guess by going with the log2 of the
5256 * number of CPUs.
5257 *
5258 * This idea comes from the SD scheduler of Con Kolivas:
5259 */
5260static inline void sched_init_granularity(void)
5261{
5262 unsigned int factor = 1 + ilog2(num_online_cpus());
5263 const unsigned long limit = 200000000;
5264
5265 sysctl_sched_min_granularity *= factor;
5266 if (sysctl_sched_min_granularity > limit)
5267 sysctl_sched_min_granularity = limit;
5268
5269 sysctl_sched_latency *= factor;
5270 if (sysctl_sched_latency > limit)
5271 sysctl_sched_latency = limit;
5272
5273 sysctl_sched_wakeup_granularity *= factor;
5274 sysctl_sched_batch_wakeup_granularity *= factor;
5275}
5276
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277#ifdef CONFIG_SMP
5278/*
5279 * This is how migration works:
5280 *
Ingo Molnar70b97a72006-07-03 00:25:42 -07005281 * 1) we queue a struct migration_req structure in the source CPU's
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 * runqueue and wake up that CPU's migration thread.
5283 * 2) we down() the locked semaphore => thread blocks.
5284 * 3) migration thread wakes up (implicitly it forces the migrated
5285 * thread off the CPU)
5286 * 4) it gets the migration request and checks whether the migrated
5287 * task is still in the wrong runqueue.
5288 * 5) if it's in the wrong runqueue then the migration thread removes
5289 * it and puts it into the right queue.
5290 * 6) migration thread up()s the semaphore.
5291 * 7) we wake up and the migration is done.
5292 */
5293
5294/*
5295 * Change a given task's CPU affinity. Migrate the thread to a
5296 * proper CPU and schedule it away if the CPU it's executing on
5297 * is removed from the allowed bitmask.
5298 *
5299 * NOTE: the caller must have a valid reference to the task, the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005300 * task must not exit() & deallocate itself prematurely. The
Linus Torvalds1da177e2005-04-16 15:20:36 -07005301 * call is not atomic; no spinlocks may be held.
5302 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07005303int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005305 struct migration_req req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005306 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005307 struct rq *rq;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005308 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309
5310 rq = task_rq_lock(p, &flags);
5311 if (!cpus_intersects(new_mask, cpu_online_map)) {
5312 ret = -EINVAL;
5313 goto out;
5314 }
5315
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005316 if (p->sched_class->set_cpus_allowed)
5317 p->sched_class->set_cpus_allowed(p, &new_mask);
5318 else {
Ingo Molnar0eab9142008-01-25 21:08:19 +01005319 p->cpus_allowed = new_mask;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01005320 p->rt.nr_cpus_allowed = cpus_weight(new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005321 }
5322
Linus Torvalds1da177e2005-04-16 15:20:36 -07005323 /* Can the task run on the task's current CPU? If so, we're done */
5324 if (cpu_isset(task_cpu(p), new_mask))
5325 goto out;
5326
5327 if (migrate_task(p, any_online_cpu(new_mask), &req)) {
5328 /* Need help from migration thread: drop lock and wait. */
5329 task_rq_unlock(rq, &flags);
5330 wake_up_process(rq->migration_thread);
5331 wait_for_completion(&req.done);
5332 tlb_migrate_finish(p->mm);
5333 return 0;
5334 }
5335out:
5336 task_rq_unlock(rq, &flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005337
Linus Torvalds1da177e2005-04-16 15:20:36 -07005338 return ret;
5339}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005340EXPORT_SYMBOL_GPL(set_cpus_allowed);
5341
5342/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005343 * Move (not current) task off this cpu, onto dest cpu. We're doing
Linus Torvalds1da177e2005-04-16 15:20:36 -07005344 * this because either it can't run here any more (set_cpus_allowed()
5345 * away from this CPU, or CPU going down), or because we're
5346 * attempting to rebalance this task on exec (sched_exec).
5347 *
5348 * So we race with normal scheduler movements, but that's OK, as long
5349 * as the task is no longer on this CPU.
Kirill Korotaevefc30812006-06-27 02:54:32 -07005350 *
5351 * Returns non-zero if task was successfully migrated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005352 */
Kirill Korotaevefc30812006-06-27 02:54:32 -07005353static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005354{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005355 struct rq *rq_dest, *rq_src;
Ingo Molnardd41f592007-07-09 18:51:59 +02005356 int ret = 0, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005357
5358 if (unlikely(cpu_is_offline(dest_cpu)))
Kirill Korotaevefc30812006-06-27 02:54:32 -07005359 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360
5361 rq_src = cpu_rq(src_cpu);
5362 rq_dest = cpu_rq(dest_cpu);
5363
5364 double_rq_lock(rq_src, rq_dest);
5365 /* Already moved. */
5366 if (task_cpu(p) != src_cpu)
5367 goto out;
5368 /* Affinity changed (again). */
5369 if (!cpu_isset(dest_cpu, p->cpus_allowed))
5370 goto out;
5371
Ingo Molnardd41f592007-07-09 18:51:59 +02005372 on_rq = p->se.on_rq;
Ingo Molnar6e82a3b2007-08-09 11:16:51 +02005373 if (on_rq)
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005374 deactivate_task(rq_src, p, 0);
Ingo Molnar6e82a3b2007-08-09 11:16:51 +02005375
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376 set_task_cpu(p, dest_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005377 if (on_rq) {
5378 activate_task(rq_dest, p, 0);
5379 check_preempt_curr(rq_dest, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005380 }
Kirill Korotaevefc30812006-06-27 02:54:32 -07005381 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382out:
5383 double_rq_unlock(rq_src, rq_dest);
Kirill Korotaevefc30812006-06-27 02:54:32 -07005384 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385}
5386
5387/*
5388 * migration_thread - this is a highprio system thread that performs
5389 * thread migration by bumping thread off CPU then 'pushing' onto
5390 * another runqueue.
5391 */
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005392static int migration_thread(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005393{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394 int cpu = (long)data;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005395 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005396
5397 rq = cpu_rq(cpu);
5398 BUG_ON(rq->migration_thread != current);
5399
5400 set_current_state(TASK_INTERRUPTIBLE);
5401 while (!kthread_should_stop()) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07005402 struct migration_req *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403 struct list_head *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005404
Linus Torvalds1da177e2005-04-16 15:20:36 -07005405 spin_lock_irq(&rq->lock);
5406
5407 if (cpu_is_offline(cpu)) {
5408 spin_unlock_irq(&rq->lock);
5409 goto wait_to_die;
5410 }
5411
5412 if (rq->active_balance) {
5413 active_load_balance(rq, cpu);
5414 rq->active_balance = 0;
5415 }
5416
5417 head = &rq->migration_queue;
5418
5419 if (list_empty(head)) {
5420 spin_unlock_irq(&rq->lock);
5421 schedule();
5422 set_current_state(TASK_INTERRUPTIBLE);
5423 continue;
5424 }
Ingo Molnar70b97a72006-07-03 00:25:42 -07005425 req = list_entry(head->next, struct migration_req, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426 list_del_init(head->next);
5427
Nick Piggin674311d2005-06-25 14:57:27 -07005428 spin_unlock(&rq->lock);
5429 __migrate_task(req->task, cpu, req->dest_cpu);
5430 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005431
5432 complete(&req->done);
5433 }
5434 __set_current_state(TASK_RUNNING);
5435 return 0;
5436
5437wait_to_die:
5438 /* Wait for kthread_stop */
5439 set_current_state(TASK_INTERRUPTIBLE);
5440 while (!kthread_should_stop()) {
5441 schedule();
5442 set_current_state(TASK_INTERRUPTIBLE);
5443 }
5444 __set_current_state(TASK_RUNNING);
5445 return 0;
5446}
5447
5448#ifdef CONFIG_HOTPLUG_CPU
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07005449
5450static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
5451{
5452 int ret;
5453
5454 local_irq_disable();
5455 ret = __migrate_task(p, src_cpu, dest_cpu);
5456 local_irq_enable();
5457 return ret;
5458}
5459
Kirill Korotaev054b9102006-12-10 02:20:11 -08005460/*
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +02005461 * Figure out where task on dead CPU should go, use force if necessary.
Kirill Korotaev054b9102006-12-10 02:20:11 -08005462 * NOTE: interrupts should be disabled by the caller
5463 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07005464static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465{
Kirill Korotaevefc30812006-06-27 02:54:32 -07005466 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005467 cpumask_t mask;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005468 struct rq *rq;
5469 int dest_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470
Andi Kleen3a5c3592007-10-15 17:00:14 +02005471 do {
5472 /* On same node? */
5473 mask = node_to_cpumask(cpu_to_node(dead_cpu));
5474 cpus_and(mask, mask, p->cpus_allowed);
5475 dest_cpu = any_online_cpu(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005476
Andi Kleen3a5c3592007-10-15 17:00:14 +02005477 /* On any allowed CPU? */
5478 if (dest_cpu == NR_CPUS)
5479 dest_cpu = any_online_cpu(p->cpus_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005480
Andi Kleen3a5c3592007-10-15 17:00:14 +02005481 /* No more Mr. Nice Guy. */
5482 if (dest_cpu == NR_CPUS) {
Cliff Wickman470fd642007-10-18 23:40:46 -07005483 cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p);
5484 /*
5485 * Try to stay on the same cpuset, where the
5486 * current cpuset may be a subset of all cpus.
5487 * The cpuset_cpus_allowed_locked() variant of
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005488 * cpuset_cpus_allowed() will not block. It must be
Cliff Wickman470fd642007-10-18 23:40:46 -07005489 * called within calls to cpuset_lock/cpuset_unlock.
5490 */
Andi Kleen3a5c3592007-10-15 17:00:14 +02005491 rq = task_rq_lock(p, &flags);
Cliff Wickman470fd642007-10-18 23:40:46 -07005492 p->cpus_allowed = cpus_allowed;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005493 dest_cpu = any_online_cpu(p->cpus_allowed);
5494 task_rq_unlock(rq, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005495
Andi Kleen3a5c3592007-10-15 17:00:14 +02005496 /*
5497 * Don't tell them about moving exiting tasks or
5498 * kernel threads (both mm NULL), since they never
5499 * leave kernel.
5500 */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005501 if (p->mm && printk_ratelimit()) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02005502 printk(KERN_INFO "process %d (%s) no "
5503 "longer affine to cpu%d\n",
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005504 task_pid_nr(p), p->comm, dead_cpu);
5505 }
Andi Kleen3a5c3592007-10-15 17:00:14 +02005506 }
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07005507 } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005508}
5509
5510/*
5511 * While a dead CPU has no uninterruptible tasks queued at this point,
5512 * it might still have a nonzero ->nr_uninterruptible counter, because
5513 * for performance reasons the counter is not stricly tracking tasks to
5514 * their home CPUs. So we just add the counter to another CPU's counter,
5515 * to keep the global sum constant after CPU-down:
5516 */
Ingo Molnar70b97a72006-07-03 00:25:42 -07005517static void migrate_nr_uninterruptible(struct rq *rq_src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005518{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005519 struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005520 unsigned long flags;
5521
5522 local_irq_save(flags);
5523 double_rq_lock(rq_src, rq_dest);
5524 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5525 rq_src->nr_uninterruptible = 0;
5526 double_rq_unlock(rq_src, rq_dest);
5527 local_irq_restore(flags);
5528}
5529
5530/* Run through task list and migrate tasks from the dead cpu. */
5531static void migrate_live_tasks(int src_cpu)
5532{
Ingo Molnar48f24c42006-07-03 00:25:40 -07005533 struct task_struct *p, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005534
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07005535 read_lock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536
Ingo Molnar48f24c42006-07-03 00:25:40 -07005537 do_each_thread(t, p) {
5538 if (p == current)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005539 continue;
5540
Ingo Molnar48f24c42006-07-03 00:25:40 -07005541 if (task_cpu(p) == src_cpu)
5542 move_task_off_dead_cpu(src_cpu, p);
5543 } while_each_thread(t, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005544
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07005545 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005546}
5547
Ingo Molnardd41f592007-07-09 18:51:59 +02005548/*
5549 * Schedules idle task to be the next runnable task on current CPU.
Dmitry Adamushko94bc9a72007-11-15 20:57:40 +01005550 * It does so by boosting its priority to highest possible.
5551 * Used by CPU offline code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005552 */
5553void sched_idle_next(void)
5554{
Ingo Molnar48f24c42006-07-03 00:25:40 -07005555 int this_cpu = smp_processor_id();
Ingo Molnar70b97a72006-07-03 00:25:42 -07005556 struct rq *rq = cpu_rq(this_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005557 struct task_struct *p = rq->idle;
5558 unsigned long flags;
5559
5560 /* cpu has to be offline */
Ingo Molnar48f24c42006-07-03 00:25:40 -07005561 BUG_ON(cpu_online(this_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005562
Ingo Molnar48f24c42006-07-03 00:25:40 -07005563 /*
5564 * Strictly not necessary since rest of the CPUs are stopped by now
5565 * and interrupts disabled on the current cpu.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566 */
5567 spin_lock_irqsave(&rq->lock, flags);
5568
Ingo Molnardd41f592007-07-09 18:51:59 +02005569 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005570
Dmitry Adamushko94bc9a72007-11-15 20:57:40 +01005571 update_rq_clock(rq);
5572 activate_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005573
5574 spin_unlock_irqrestore(&rq->lock, flags);
5575}
5576
Ingo Molnar48f24c42006-07-03 00:25:40 -07005577/*
5578 * Ensures that the idle task is using init_mm right before its cpu goes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005579 * offline.
5580 */
5581void idle_task_exit(void)
5582{
5583 struct mm_struct *mm = current->active_mm;
5584
5585 BUG_ON(cpu_online(smp_processor_id()));
5586
5587 if (mm != &init_mm)
5588 switch_mm(mm, &init_mm, current);
5589 mmdrop(mm);
5590}
5591
Kirill Korotaev054b9102006-12-10 02:20:11 -08005592/* called under rq->lock with disabled interrupts */
Ingo Molnar36c8b582006-07-03 00:25:41 -07005593static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005594{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005595 struct rq *rq = cpu_rq(dead_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005596
5597 /* Must be exiting, otherwise would be on tasklist. */
Eugene Teo270f7222007-10-18 23:40:38 -07005598 BUG_ON(!p->exit_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005599
5600 /* Cannot have done final schedule yet: would have vanished. */
Oleg Nesterovc394cc92006-09-29 02:01:11 -07005601 BUG_ON(p->state == TASK_DEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005602
Ingo Molnar48f24c42006-07-03 00:25:40 -07005603 get_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005604
5605 /*
5606 * Drop lock around migration; if someone else moves it,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005607 * that's OK. No task can be added to this CPU, so iteration is
Linus Torvalds1da177e2005-04-16 15:20:36 -07005608 * fine.
5609 */
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07005610 spin_unlock_irq(&rq->lock);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005611 move_task_off_dead_cpu(dead_cpu, p);
Oleg Nesterovf7b4cdd2007-10-16 23:30:56 -07005612 spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005613
Ingo Molnar48f24c42006-07-03 00:25:40 -07005614 put_task_struct(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005615}
5616
5617/* release_task() removes task from tasklist, so we won't find dead tasks. */
5618static void migrate_dead_tasks(unsigned int dead_cpu)
5619{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005620 struct rq *rq = cpu_rq(dead_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005621 struct task_struct *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005622
Ingo Molnardd41f592007-07-09 18:51:59 +02005623 for ( ; ; ) {
5624 if (!rq->nr_running)
5625 break;
Ingo Molnara8e504d2007-08-09 11:16:47 +02005626 update_rq_clock(rq);
Ingo Molnarff95f3d2007-08-09 11:16:49 +02005627 next = pick_next_task(rq, rq->curr);
Ingo Molnardd41f592007-07-09 18:51:59 +02005628 if (!next)
5629 break;
5630 migrate_dead(dead_cpu, next);
Nick Piggine692ab52007-07-26 13:40:43 +02005631
Linus Torvalds1da177e2005-04-16 15:20:36 -07005632 }
5633}
5634#endif /* CONFIG_HOTPLUG_CPU */
5635
Nick Piggine692ab52007-07-26 13:40:43 +02005636#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5637
5638static struct ctl_table sd_ctl_dir[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02005639 {
5640 .procname = "sched_domain",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005641 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02005642 },
Ingo Molnar38605ca2007-10-29 21:18:11 +01005643 {0, },
Nick Piggine692ab52007-07-26 13:40:43 +02005644};
5645
5646static struct ctl_table sd_ctl_root[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02005647 {
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005648 .ctl_name = CTL_KERN,
Alexey Dobriyane0361852007-08-09 11:16:46 +02005649 .procname = "kernel",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005650 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02005651 .child = sd_ctl_dir,
5652 },
Ingo Molnar38605ca2007-10-29 21:18:11 +01005653 {0, },
Nick Piggine692ab52007-07-26 13:40:43 +02005654};
5655
5656static struct ctl_table *sd_alloc_ctl_entry(int n)
5657{
5658 struct ctl_table *entry =
Milton Miller5cf9f062007-10-15 17:00:19 +02005659 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
Nick Piggine692ab52007-07-26 13:40:43 +02005660
Nick Piggine692ab52007-07-26 13:40:43 +02005661 return entry;
5662}
5663
Milton Miller6382bc92007-10-15 17:00:19 +02005664static void sd_free_ctl_entry(struct ctl_table **tablep)
5665{
Milton Millercd7900762007-10-17 16:55:11 +02005666 struct ctl_table *entry;
Milton Miller6382bc92007-10-15 17:00:19 +02005667
Milton Millercd7900762007-10-17 16:55:11 +02005668 /*
5669 * In the intermediate directories, both the child directory and
5670 * procname are dynamically allocated and could fail but the mode
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005671 * will always be set. In the lowest directory the names are
Milton Millercd7900762007-10-17 16:55:11 +02005672 * static strings and all have proc handlers.
5673 */
5674 for (entry = *tablep; entry->mode; entry++) {
Milton Miller6382bc92007-10-15 17:00:19 +02005675 if (entry->child)
5676 sd_free_ctl_entry(&entry->child);
Milton Millercd7900762007-10-17 16:55:11 +02005677 if (entry->proc_handler == NULL)
5678 kfree(entry->procname);
5679 }
Milton Miller6382bc92007-10-15 17:00:19 +02005680
5681 kfree(*tablep);
5682 *tablep = NULL;
5683}
5684
Nick Piggine692ab52007-07-26 13:40:43 +02005685static void
Alexey Dobriyane0361852007-08-09 11:16:46 +02005686set_table_entry(struct ctl_table *entry,
Nick Piggine692ab52007-07-26 13:40:43 +02005687 const char *procname, void *data, int maxlen,
5688 mode_t mode, proc_handler *proc_handler)
5689{
Nick Piggine692ab52007-07-26 13:40:43 +02005690 entry->procname = procname;
5691 entry->data = data;
5692 entry->maxlen = maxlen;
5693 entry->mode = mode;
5694 entry->proc_handler = proc_handler;
5695}
5696
5697static struct ctl_table *
5698sd_alloc_ctl_domain_table(struct sched_domain *sd)
5699{
Zou Nan haiace8b3d2007-10-15 17:00:14 +02005700 struct ctl_table *table = sd_alloc_ctl_entry(12);
Nick Piggine692ab52007-07-26 13:40:43 +02005701
Milton Millerad1cdc12007-10-15 17:00:19 +02005702 if (table == NULL)
5703 return NULL;
5704
Alexey Dobriyane0361852007-08-09 11:16:46 +02005705 set_table_entry(&table[0], "min_interval", &sd->min_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02005706 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005707 set_table_entry(&table[1], "max_interval", &sd->max_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02005708 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005709 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005710 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005711 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005712 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005713 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005714 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005715 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005716 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005717 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005718 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005719 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
Nick Piggine692ab52007-07-26 13:40:43 +02005720 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005721 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
Nick Piggine692ab52007-07-26 13:40:43 +02005722 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02005723 set_table_entry(&table[9], "cache_nice_tries",
Nick Piggine692ab52007-07-26 13:40:43 +02005724 &sd->cache_nice_tries,
5725 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02005726 set_table_entry(&table[10], "flags", &sd->flags,
Nick Piggine692ab52007-07-26 13:40:43 +02005727 sizeof(int), 0644, proc_dointvec_minmax);
Milton Miller6323469f2007-10-15 17:00:19 +02005728 /* &table[11] is terminator */
Nick Piggine692ab52007-07-26 13:40:43 +02005729
5730 return table;
5731}
5732
Ingo Molnar9a4e7152007-11-28 15:52:56 +01005733static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
Nick Piggine692ab52007-07-26 13:40:43 +02005734{
5735 struct ctl_table *entry, *table;
5736 struct sched_domain *sd;
5737 int domain_num = 0, i;
5738 char buf[32];
5739
5740 for_each_domain(cpu, sd)
5741 domain_num++;
5742 entry = table = sd_alloc_ctl_entry(domain_num + 1);
Milton Millerad1cdc12007-10-15 17:00:19 +02005743 if (table == NULL)
5744 return NULL;
Nick Piggine692ab52007-07-26 13:40:43 +02005745
5746 i = 0;
5747 for_each_domain(cpu, sd) {
5748 snprintf(buf, 32, "domain%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02005749 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005750 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02005751 entry->child = sd_alloc_ctl_domain_table(sd);
5752 entry++;
5753 i++;
5754 }
5755 return table;
5756}
5757
5758static struct ctl_table_header *sd_sysctl_header;
Milton Miller6382bc92007-10-15 17:00:19 +02005759static void register_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02005760{
5761 int i, cpu_num = num_online_cpus();
5762 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5763 char buf[32];
5764
Milton Miller73785472007-10-24 18:23:48 +02005765 WARN_ON(sd_ctl_dir[0].child);
5766 sd_ctl_dir[0].child = entry;
5767
Milton Millerad1cdc12007-10-15 17:00:19 +02005768 if (entry == NULL)
5769 return;
5770
Milton Miller97b6ea72007-10-15 17:00:19 +02005771 for_each_online_cpu(i) {
Nick Piggine692ab52007-07-26 13:40:43 +02005772 snprintf(buf, 32, "cpu%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02005773 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005774 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02005775 entry->child = sd_alloc_ctl_cpu_table(i);
Milton Miller97b6ea72007-10-15 17:00:19 +02005776 entry++;
Nick Piggine692ab52007-07-26 13:40:43 +02005777 }
Milton Miller73785472007-10-24 18:23:48 +02005778
5779 WARN_ON(sd_sysctl_header);
Nick Piggine692ab52007-07-26 13:40:43 +02005780 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5781}
Milton Miller6382bc92007-10-15 17:00:19 +02005782
Milton Miller73785472007-10-24 18:23:48 +02005783/* may be called multiple times per register */
Milton Miller6382bc92007-10-15 17:00:19 +02005784static void unregister_sched_domain_sysctl(void)
5785{
Milton Miller73785472007-10-24 18:23:48 +02005786 if (sd_sysctl_header)
5787 unregister_sysctl_table(sd_sysctl_header);
Milton Miller6382bc92007-10-15 17:00:19 +02005788 sd_sysctl_header = NULL;
Milton Miller73785472007-10-24 18:23:48 +02005789 if (sd_ctl_dir[0].child)
5790 sd_free_ctl_entry(&sd_ctl_dir[0].child);
Milton Miller6382bc92007-10-15 17:00:19 +02005791}
Nick Piggine692ab52007-07-26 13:40:43 +02005792#else
Milton Miller6382bc92007-10-15 17:00:19 +02005793static void register_sched_domain_sysctl(void)
5794{
5795}
5796static void unregister_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02005797{
5798}
5799#endif
5800
Linus Torvalds1da177e2005-04-16 15:20:36 -07005801/*
5802 * migration_call - callback that gets triggered when a CPU is added.
5803 * Here we can start up the necessary migration thread for the new CPU.
5804 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07005805static int __cpuinit
5806migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005807{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005808 struct task_struct *p;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005809 int cpu = (long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005810 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005811 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005812
5813 switch (action) {
Gautham R Shenoy5be93612007-05-09 02:34:04 -07005814
Linus Torvalds1da177e2005-04-16 15:20:36 -07005815 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005816 case CPU_UP_PREPARE_FROZEN:
Ingo Molnardd41f592007-07-09 18:51:59 +02005817 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005818 if (IS_ERR(p))
5819 return NOTIFY_BAD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005820 kthread_bind(p, cpu);
5821 /* Must be high prio: stop_machine expects to yield to it. */
5822 rq = task_rq_lock(p, &flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02005823 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005824 task_rq_unlock(rq, &flags);
5825 cpu_rq(cpu)->migration_thread = p;
5826 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005827
Linus Torvalds1da177e2005-04-16 15:20:36 -07005828 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005829 case CPU_ONLINE_FROZEN:
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +02005830 /* Strictly unnecessary, as first user will wake it. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005831 wake_up_process(cpu_rq(cpu)->migration_thread);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005832
5833 /* Update our root-domain */
5834 rq = cpu_rq(cpu);
5835 spin_lock_irqsave(&rq->lock, flags);
5836 if (rq->rd) {
5837 BUG_ON(!cpu_isset(cpu, rq->rd->span));
5838 cpu_set(cpu, rq->rd->online);
5839 }
5840 spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005841 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005842
Linus Torvalds1da177e2005-04-16 15:20:36 -07005843#ifdef CONFIG_HOTPLUG_CPU
5844 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005845 case CPU_UP_CANCELED_FROZEN:
Heiko Carstensfc75cdf2006-06-25 05:49:10 -07005846 if (!cpu_rq(cpu)->migration_thread)
5847 break;
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005848 /* Unbind it from offline cpu so it can run. Fall thru. */
Heiko Carstensa4c4af72005-11-07 00:58:38 -08005849 kthread_bind(cpu_rq(cpu)->migration_thread,
5850 any_online_cpu(cpu_online_map));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005851 kthread_stop(cpu_rq(cpu)->migration_thread);
5852 cpu_rq(cpu)->migration_thread = NULL;
5853 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005854
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005856 case CPU_DEAD_FROZEN:
Cliff Wickman470fd642007-10-18 23:40:46 -07005857 cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005858 migrate_live_tasks(cpu);
5859 rq = cpu_rq(cpu);
5860 kthread_stop(rq->migration_thread);
5861 rq->migration_thread = NULL;
5862 /* Idle task back to normal (off runqueue, low prio) */
Oleg Nesterovd2da2722007-10-16 23:30:56 -07005863 spin_lock_irq(&rq->lock);
Ingo Molnara8e504d2007-08-09 11:16:47 +02005864 update_rq_clock(rq);
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005865 deactivate_task(rq, rq->idle, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005866 rq->idle->static_prio = MAX_PRIO;
Ingo Molnardd41f592007-07-09 18:51:59 +02005867 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
5868 rq->idle->sched_class = &idle_sched_class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005869 migrate_dead_tasks(cpu);
Oleg Nesterovd2da2722007-10-16 23:30:56 -07005870 spin_unlock_irq(&rq->lock);
Cliff Wickman470fd642007-10-18 23:40:46 -07005871 cpuset_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005872 migrate_nr_uninterruptible(rq);
5873 BUG_ON(rq->nr_running != 0);
5874
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005875 /*
5876 * No need to migrate the tasks: it was best-effort if
5877 * they didn't take sched_hotcpu_mutex. Just wake up
5878 * the requestors.
5879 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005880 spin_lock_irq(&rq->lock);
5881 while (!list_empty(&rq->migration_queue)) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07005882 struct migration_req *req;
5883
Linus Torvalds1da177e2005-04-16 15:20:36 -07005884 req = list_entry(rq->migration_queue.next,
Ingo Molnar70b97a72006-07-03 00:25:42 -07005885 struct migration_req, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005886 list_del_init(&req->list);
5887 complete(&req->done);
5888 }
5889 spin_unlock_irq(&rq->lock);
5890 break;
Gregory Haskins57d885f2008-01-25 21:08:18 +01005891
5892 case CPU_DOWN_PREPARE:
5893 /* Update our root-domain */
5894 rq = cpu_rq(cpu);
5895 spin_lock_irqsave(&rq->lock, flags);
5896 if (rq->rd) {
5897 BUG_ON(!cpu_isset(cpu, rq->rd->span));
5898 cpu_clear(cpu, rq->rd->online);
5899 }
5900 spin_unlock_irqrestore(&rq->lock, flags);
5901 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005902#endif
5903 }
5904 return NOTIFY_OK;
5905}
5906
5907/* Register at highest priority so that task migration (migrate_all_tasks)
5908 * happens before everything else.
5909 */
Chandra Seetharaman26c21432006-06-27 02:54:10 -07005910static struct notifier_block __cpuinitdata migration_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005911 .notifier_call = migration_call,
5912 .priority = 10
5913};
5914
Adrian Bunke6fe6642007-11-09 22:39:39 +01005915void __init migration_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005916{
5917 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -07005918 int err;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005919
5920 /* Start one for the boot CPU: */
Akinobu Mita07dccf32006-09-29 02:00:22 -07005921 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5922 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005923 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5924 register_cpu_notifier(&migration_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005925}
5926#endif
5927
5928#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07005929
5930/* Number of possible processor ids */
5931int nr_cpu_ids __read_mostly = NR_CPUS;
5932EXPORT_SYMBOL(nr_cpu_ids);
5933
Ingo Molnar3e9830d2007-10-15 17:00:13 +02005934#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005935
5936static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
5937{
5938 struct sched_group *group = sd->groups;
5939 cpumask_t groupmask;
5940 char str[NR_CPUS];
5941
5942 cpumask_scnprintf(str, NR_CPUS, sd->span);
5943 cpus_clear(groupmask);
5944
5945 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5946
5947 if (!(sd->flags & SD_LOAD_BALANCE)) {
5948 printk("does not load-balance\n");
5949 if (sd->parent)
5950 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5951 " has parent");
5952 return -1;
5953 }
5954
5955 printk(KERN_CONT "span %s\n", str);
5956
5957 if (!cpu_isset(cpu, sd->span)) {
5958 printk(KERN_ERR "ERROR: domain->span does not contain "
5959 "CPU%d\n", cpu);
5960 }
5961 if (!cpu_isset(cpu, group->cpumask)) {
5962 printk(KERN_ERR "ERROR: domain->groups does not contain"
5963 " CPU%d\n", cpu);
5964 }
5965
5966 printk(KERN_DEBUG "%*s groups:", level + 1, "");
5967 do {
5968 if (!group) {
5969 printk("\n");
5970 printk(KERN_ERR "ERROR: group is NULL\n");
5971 break;
5972 }
5973
5974 if (!group->__cpu_power) {
5975 printk(KERN_CONT "\n");
5976 printk(KERN_ERR "ERROR: domain->cpu_power not "
5977 "set\n");
5978 break;
5979 }
5980
5981 if (!cpus_weight(group->cpumask)) {
5982 printk(KERN_CONT "\n");
5983 printk(KERN_ERR "ERROR: empty group\n");
5984 break;
5985 }
5986
5987 if (cpus_intersects(groupmask, group->cpumask)) {
5988 printk(KERN_CONT "\n");
5989 printk(KERN_ERR "ERROR: repeated CPUs\n");
5990 break;
5991 }
5992
5993 cpus_or(groupmask, groupmask, group->cpumask);
5994
5995 cpumask_scnprintf(str, NR_CPUS, group->cpumask);
5996 printk(KERN_CONT " %s", str);
5997
5998 group = group->next;
5999 } while (group != sd->groups);
6000 printk(KERN_CONT "\n");
6001
6002 if (!cpus_equal(sd->span, groupmask))
6003 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
6004
6005 if (sd->parent && !cpus_subset(groupmask, sd->parent->span))
6006 printk(KERN_ERR "ERROR: parent span is not a superset "
6007 "of domain->span\n");
6008 return 0;
6009}
6010
Linus Torvalds1da177e2005-04-16 15:20:36 -07006011static void sched_domain_debug(struct sched_domain *sd, int cpu)
6012{
6013 int level = 0;
6014
Nick Piggin41c7ce92005-06-25 14:57:24 -07006015 if (!sd) {
6016 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6017 return;
6018 }
6019
Linus Torvalds1da177e2005-04-16 15:20:36 -07006020 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6021
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006022 for (;;) {
6023 if (sched_domain_debug_one(sd, cpu, level))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006024 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006025 level++;
6026 sd = sd->parent;
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08006027 if (!sd)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006028 break;
6029 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006030}
6031#else
Ingo Molnar48f24c42006-07-03 00:25:40 -07006032# define sched_domain_debug(sd, cpu) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006033#endif
6034
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006035static int sd_degenerate(struct sched_domain *sd)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006036{
6037 if (cpus_weight(sd->span) == 1)
6038 return 1;
6039
6040 /* Following flags need at least 2 groups */
6041 if (sd->flags & (SD_LOAD_BALANCE |
6042 SD_BALANCE_NEWIDLE |
6043 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006044 SD_BALANCE_EXEC |
6045 SD_SHARE_CPUPOWER |
6046 SD_SHARE_PKG_RESOURCES)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006047 if (sd->groups != sd->groups->next)
6048 return 0;
6049 }
6050
6051 /* Following flags don't use groups */
6052 if (sd->flags & (SD_WAKE_IDLE |
6053 SD_WAKE_AFFINE |
6054 SD_WAKE_BALANCE))
6055 return 0;
6056
6057 return 1;
6058}
6059
Ingo Molnar48f24c42006-07-03 00:25:40 -07006060static int
6061sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006062{
6063 unsigned long cflags = sd->flags, pflags = parent->flags;
6064
6065 if (sd_degenerate(parent))
6066 return 1;
6067
6068 if (!cpus_equal(sd->span, parent->span))
6069 return 0;
6070
6071 /* Does parent contain flags not in child? */
6072 /* WAKE_BALANCE is a subset of WAKE_AFFINE */
6073 if (cflags & SD_WAKE_AFFINE)
6074 pflags &= ~SD_WAKE_BALANCE;
6075 /* Flags needing groups don't count if only 1 group in parent */
6076 if (parent->groups == parent->groups->next) {
6077 pflags &= ~(SD_LOAD_BALANCE |
6078 SD_BALANCE_NEWIDLE |
6079 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006080 SD_BALANCE_EXEC |
6081 SD_SHARE_CPUPOWER |
6082 SD_SHARE_PKG_RESOURCES);
Suresh Siddha245af2c2005-06-25 14:57:25 -07006083 }
6084 if (~cflags & pflags)
6085 return 0;
6086
6087 return 1;
6088}
6089
Gregory Haskins57d885f2008-01-25 21:08:18 +01006090static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6091{
6092 unsigned long flags;
6093 const struct sched_class *class;
6094
6095 spin_lock_irqsave(&rq->lock, flags);
6096
6097 if (rq->rd) {
6098 struct root_domain *old_rd = rq->rd;
6099
Ingo Molnar0eab9142008-01-25 21:08:19 +01006100 for (class = sched_class_highest; class; class = class->next) {
Gregory Haskins57d885f2008-01-25 21:08:18 +01006101 if (class->leave_domain)
6102 class->leave_domain(rq);
Ingo Molnar0eab9142008-01-25 21:08:19 +01006103 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01006104
Gregory Haskinsdc938522008-01-25 21:08:26 +01006105 cpu_clear(rq->cpu, old_rd->span);
6106 cpu_clear(rq->cpu, old_rd->online);
6107
Gregory Haskins57d885f2008-01-25 21:08:18 +01006108 if (atomic_dec_and_test(&old_rd->refcount))
6109 kfree(old_rd);
6110 }
6111
6112 atomic_inc(&rd->refcount);
6113 rq->rd = rd;
6114
Gregory Haskinsdc938522008-01-25 21:08:26 +01006115 cpu_set(rq->cpu, rd->span);
6116 if (cpu_isset(rq->cpu, cpu_online_map))
6117 cpu_set(rq->cpu, rd->online);
6118
Ingo Molnar0eab9142008-01-25 21:08:19 +01006119 for (class = sched_class_highest; class; class = class->next) {
Gregory Haskins57d885f2008-01-25 21:08:18 +01006120 if (class->join_domain)
6121 class->join_domain(rq);
Ingo Molnar0eab9142008-01-25 21:08:19 +01006122 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01006123
6124 spin_unlock_irqrestore(&rq->lock, flags);
6125}
6126
Gregory Haskinsdc938522008-01-25 21:08:26 +01006127static void init_rootdomain(struct root_domain *rd)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006128{
6129 memset(rd, 0, sizeof(*rd));
6130
Gregory Haskinsdc938522008-01-25 21:08:26 +01006131 cpus_clear(rd->span);
6132 cpus_clear(rd->online);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006133}
6134
6135static void init_defrootdomain(void)
6136{
Gregory Haskinsdc938522008-01-25 21:08:26 +01006137 init_rootdomain(&def_root_domain);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006138 atomic_set(&def_root_domain.refcount, 1);
6139}
6140
Gregory Haskinsdc938522008-01-25 21:08:26 +01006141static struct root_domain *alloc_rootdomain(void)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006142{
6143 struct root_domain *rd;
6144
6145 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6146 if (!rd)
6147 return NULL;
6148
Gregory Haskinsdc938522008-01-25 21:08:26 +01006149 init_rootdomain(rd);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006150
6151 return rd;
6152}
6153
Linus Torvalds1da177e2005-04-16 15:20:36 -07006154/*
Ingo Molnar0eab9142008-01-25 21:08:19 +01006155 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
Linus Torvalds1da177e2005-04-16 15:20:36 -07006156 * hold the hotplug lock.
6157 */
Ingo Molnar0eab9142008-01-25 21:08:19 +01006158static void
6159cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006160{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006161 struct rq *rq = cpu_rq(cpu);
Suresh Siddha245af2c2005-06-25 14:57:25 -07006162 struct sched_domain *tmp;
6163
6164 /* Remove the sched domains which do not contribute to scheduling. */
6165 for (tmp = sd; tmp; tmp = tmp->parent) {
6166 struct sched_domain *parent = tmp->parent;
6167 if (!parent)
6168 break;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006169 if (sd_parent_degenerate(tmp, parent)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006170 tmp->parent = parent->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006171 if (parent->parent)
6172 parent->parent->child = tmp;
6173 }
Suresh Siddha245af2c2005-06-25 14:57:25 -07006174 }
6175
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006176 if (sd && sd_degenerate(sd)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006177 sd = sd->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006178 if (sd)
6179 sd->child = NULL;
6180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006181
6182 sched_domain_debug(sd, cpu);
6183
Gregory Haskins57d885f2008-01-25 21:08:18 +01006184 rq_attach_root(rq, rd);
Nick Piggin674311d2005-06-25 14:57:27 -07006185 rcu_assign_pointer(rq->sd, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006186}
6187
6188/* cpus with isolated domains */
Tim Chen67af63a2006-12-22 01:07:50 -08006189static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006190
6191/* Setup the mask of cpus configured for isolated domains */
6192static int __init isolated_cpu_setup(char *str)
6193{
6194 int ints[NR_CPUS], i;
6195
6196 str = get_options(str, ARRAY_SIZE(ints), ints);
6197 cpus_clear(cpu_isolated_map);
6198 for (i = 1; i <= ints[0]; i++)
6199 if (ints[i] < NR_CPUS)
6200 cpu_set(ints[i], cpu_isolated_map);
6201 return 1;
6202}
6203
Ingo Molnar8927f492007-10-15 17:00:13 +02006204__setup("isolcpus=", isolated_cpu_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006205
6206/*
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006207 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
6208 * to a function which identifies what group(along with sched group) a CPU
6209 * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
6210 * (due to the fact that we keep track of groups covered with a cpumask_t).
Linus Torvalds1da177e2005-04-16 15:20:36 -07006211 *
6212 * init_sched_build_groups will build a circular linked list of the groups
6213 * covered by the given span, and will set each group's ->cpumask correctly,
6214 * and ->cpu_power to 0.
6215 */
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006216static void
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006217init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
6218 int (*group_fn)(int cpu, const cpumask_t *cpu_map,
6219 struct sched_group **sg))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006220{
6221 struct sched_group *first = NULL, *last = NULL;
6222 cpumask_t covered = CPU_MASK_NONE;
6223 int i;
6224
6225 for_each_cpu_mask(i, span) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006226 struct sched_group *sg;
6227 int group = group_fn(i, cpu_map, &sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006228 int j;
6229
6230 if (cpu_isset(i, covered))
6231 continue;
6232
6233 sg->cpumask = CPU_MASK_NONE;
Eric Dumazet5517d862007-05-08 00:32:57 -07006234 sg->__cpu_power = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006235
6236 for_each_cpu_mask(j, span) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006237 if (group_fn(j, cpu_map, NULL) != group)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006238 continue;
6239
6240 cpu_set(j, covered);
6241 cpu_set(j, sg->cpumask);
6242 }
6243 if (!first)
6244 first = sg;
6245 if (last)
6246 last->next = sg;
6247 last = sg;
6248 }
6249 last->next = first;
6250}
6251
John Hawkes9c1cfda2005-09-06 15:18:14 -07006252#define SD_NODES_PER_DOMAIN 16
Linus Torvalds1da177e2005-04-16 15:20:36 -07006253
John Hawkes9c1cfda2005-09-06 15:18:14 -07006254#ifdef CONFIG_NUMA
akpm@osdl.org198e2f12006-01-12 01:05:30 -08006255
John Hawkes9c1cfda2005-09-06 15:18:14 -07006256/**
6257 * find_next_best_node - find the next node to include in a sched_domain
6258 * @node: node whose sched_domain we're building
6259 * @used_nodes: nodes already in the sched_domain
6260 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006261 * Find the next node to include in a given scheduling domain. Simply
John Hawkes9c1cfda2005-09-06 15:18:14 -07006262 * finds the closest node not already in the @used_nodes map.
6263 *
6264 * Should use nodemask_t.
6265 */
6266static int find_next_best_node(int node, unsigned long *used_nodes)
6267{
6268 int i, n, val, min_val, best_node = 0;
6269
6270 min_val = INT_MAX;
6271
6272 for (i = 0; i < MAX_NUMNODES; i++) {
6273 /* Start at @node */
6274 n = (node + i) % MAX_NUMNODES;
6275
6276 if (!nr_cpus_node(n))
6277 continue;
6278
6279 /* Skip already used nodes */
6280 if (test_bit(n, used_nodes))
6281 continue;
6282
6283 /* Simple min distance search */
6284 val = node_distance(node, n);
6285
6286 if (val < min_val) {
6287 min_val = val;
6288 best_node = n;
6289 }
6290 }
6291
6292 set_bit(best_node, used_nodes);
6293 return best_node;
6294}
6295
6296/**
6297 * sched_domain_node_span - get a cpumask for a node's sched_domain
6298 * @node: node whose cpumask we're constructing
6299 * @size: number of nodes to include in this span
6300 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006301 * Given a node, construct a good cpumask for its sched_domain to span. It
John Hawkes9c1cfda2005-09-06 15:18:14 -07006302 * should be one that prevents unnecessary balancing, but also spreads tasks
6303 * out optimally.
6304 */
6305static cpumask_t sched_domain_node_span(int node)
6306{
John Hawkes9c1cfda2005-09-06 15:18:14 -07006307 DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006308 cpumask_t span, nodemask;
6309 int i;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006310
6311 cpus_clear(span);
6312 bitmap_zero(used_nodes, MAX_NUMNODES);
6313
6314 nodemask = node_to_cpumask(node);
6315 cpus_or(span, span, nodemask);
6316 set_bit(node, used_nodes);
6317
6318 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
6319 int next_node = find_next_best_node(node, used_nodes);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006320
John Hawkes9c1cfda2005-09-06 15:18:14 -07006321 nodemask = node_to_cpumask(next_node);
6322 cpus_or(span, span, nodemask);
6323 }
6324
6325 return span;
6326}
6327#endif
6328
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006329int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006330
John Hawkes9c1cfda2005-09-06 15:18:14 -07006331/*
Ingo Molnar48f24c42006-07-03 00:25:40 -07006332 * SMT sched-domains:
John Hawkes9c1cfda2005-09-06 15:18:14 -07006333 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006334#ifdef CONFIG_SCHED_SMT
6335static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006336static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006337
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006338static int
6339cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006340{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006341 if (sg)
6342 *sg = &per_cpu(sched_group_cpus, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006343 return cpu;
6344}
6345#endif
6346
Ingo Molnar48f24c42006-07-03 00:25:40 -07006347/*
6348 * multi-core sched-domains:
6349 */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006350#ifdef CONFIG_SCHED_MC
6351static DEFINE_PER_CPU(struct sched_domain, core_domains);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006352static DEFINE_PER_CPU(struct sched_group, sched_group_core);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006353#endif
6354
6355#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006356static int
6357cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006358{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006359 int group;
Mike Travisd5a74302007-10-16 01:24:05 -07006360 cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006361 cpus_and(mask, mask, *cpu_map);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006362 group = first_cpu(mask);
6363 if (sg)
6364 *sg = &per_cpu(sched_group_core, group);
6365 return group;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006366}
6367#elif defined(CONFIG_SCHED_MC)
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006368static int
6369cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006370{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006371 if (sg)
6372 *sg = &per_cpu(sched_group_core, cpu);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006373 return cpu;
6374}
6375#endif
6376
Linus Torvalds1da177e2005-04-16 15:20:36 -07006377static DEFINE_PER_CPU(struct sched_domain, phys_domains);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006378static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006379
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006380static int
6381cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006383 int group;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006384#ifdef CONFIG_SCHED_MC
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006385 cpumask_t mask = cpu_coregroup_map(cpu);
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006386 cpus_and(mask, mask, *cpu_map);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006387 group = first_cpu(mask);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006388#elif defined(CONFIG_SCHED_SMT)
Mike Travisd5a74302007-10-16 01:24:05 -07006389 cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006390 cpus_and(mask, mask, *cpu_map);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006391 group = first_cpu(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006392#else
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006393 group = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006394#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006395 if (sg)
6396 *sg = &per_cpu(sched_group_phys, group);
6397 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006398}
6399
6400#ifdef CONFIG_NUMA
John Hawkes9c1cfda2005-09-06 15:18:14 -07006401/*
6402 * The init_sched_build_groups can't handle what we want to do with node
6403 * groups, so roll our own. Now each node has its own list of groups which
6404 * gets dynamically allocated.
6405 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006406static DEFINE_PER_CPU(struct sched_domain, node_domains);
John Hawkesd1b55132005-09-06 15:18:14 -07006407static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
John Hawkes9c1cfda2005-09-06 15:18:14 -07006408
6409static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006410static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006411
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006412static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
6413 struct sched_group **sg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006414{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006415 cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
6416 int group;
6417
6418 cpus_and(nodemask, nodemask, *cpu_map);
6419 group = first_cpu(nodemask);
6420
6421 if (sg)
6422 *sg = &per_cpu(sched_group_allnodes, group);
6423 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006424}
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006425
Siddha, Suresh B08069032006-03-27 01:15:23 -08006426static void init_numa_sched_groups_power(struct sched_group *group_head)
6427{
6428 struct sched_group *sg = group_head;
6429 int j;
6430
6431 if (!sg)
6432 return;
Andi Kleen3a5c3592007-10-15 17:00:14 +02006433 do {
6434 for_each_cpu_mask(j, sg->cpumask) {
6435 struct sched_domain *sd;
Siddha, Suresh B08069032006-03-27 01:15:23 -08006436
Andi Kleen3a5c3592007-10-15 17:00:14 +02006437 sd = &per_cpu(phys_domains, j);
6438 if (j != first_cpu(sd->groups->cpumask)) {
6439 /*
6440 * Only add "power" once for each
6441 * physical package.
6442 */
6443 continue;
6444 }
6445
6446 sg_inc_cpu_power(sg, sd->groups->__cpu_power);
Siddha, Suresh B08069032006-03-27 01:15:23 -08006447 }
Andi Kleen3a5c3592007-10-15 17:00:14 +02006448 sg = sg->next;
6449 } while (sg != group_head);
Siddha, Suresh B08069032006-03-27 01:15:23 -08006450}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006451#endif
6452
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006453#ifdef CONFIG_NUMA
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006454/* Free memory allocated for various sched_group structures */
6455static void free_sched_groups(const cpumask_t *cpu_map)
6456{
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006457 int cpu, i;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006458
6459 for_each_cpu_mask(cpu, *cpu_map) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006460 struct sched_group **sched_group_nodes
6461 = sched_group_nodes_bycpu[cpu];
6462
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006463 if (!sched_group_nodes)
6464 continue;
6465
6466 for (i = 0; i < MAX_NUMNODES; i++) {
6467 cpumask_t nodemask = node_to_cpumask(i);
6468 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6469
6470 cpus_and(nodemask, nodemask, *cpu_map);
6471 if (cpus_empty(nodemask))
6472 continue;
6473
6474 if (sg == NULL)
6475 continue;
6476 sg = sg->next;
6477next_sg:
6478 oldsg = sg;
6479 sg = sg->next;
6480 kfree(oldsg);
6481 if (oldsg != sched_group_nodes[i])
6482 goto next_sg;
6483 }
6484 kfree(sched_group_nodes);
6485 sched_group_nodes_bycpu[cpu] = NULL;
6486 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006487}
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006488#else
6489static void free_sched_groups(const cpumask_t *cpu_map)
6490{
6491}
6492#endif
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006493
Linus Torvalds1da177e2005-04-16 15:20:36 -07006494/*
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006495 * Initialize sched groups cpu_power.
6496 *
6497 * cpu_power indicates the capacity of sched group, which is used while
6498 * distributing the load between different sched groups in a sched domain.
6499 * Typically cpu_power for all the groups in a sched domain will be same unless
6500 * there are asymmetries in the topology. If there are asymmetries, group
6501 * having more cpu_power will pickup more load compared to the group having
6502 * less cpu_power.
6503 *
6504 * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
6505 * the maximum number of tasks a group can handle in the presence of other idle
6506 * or lightly loaded groups in the same sched domain.
6507 */
6508static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6509{
6510 struct sched_domain *child;
6511 struct sched_group *group;
6512
6513 WARN_ON(!sd || !sd->groups);
6514
6515 if (cpu != first_cpu(sd->groups->cpumask))
6516 return;
6517
6518 child = sd->child;
6519
Eric Dumazet5517d862007-05-08 00:32:57 -07006520 sd->groups->__cpu_power = 0;
6521
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006522 /*
6523 * For perf policy, if the groups in child domain share resources
6524 * (for example cores sharing some portions of the cache hierarchy
6525 * or SMT), then set this domain groups cpu_power such that each group
6526 * can handle only one task, when there are other idle groups in the
6527 * same sched domain.
6528 */
6529 if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
6530 (child->flags &
6531 (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
Eric Dumazet5517d862007-05-08 00:32:57 -07006532 sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006533 return;
6534 }
6535
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006536 /*
6537 * add cpu_power of each child group to this groups cpu_power
6538 */
6539 group = child->groups;
6540 do {
Eric Dumazet5517d862007-05-08 00:32:57 -07006541 sg_inc_cpu_power(sd->groups, group->__cpu_power);
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006542 group = group->next;
6543 } while (group != child->groups);
6544}
6545
6546/*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006547 * Build sched domains for a given set of cpus and attach the sched domains
6548 * to the individual cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07006549 */
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006550static int build_sched_domains(const cpumask_t *cpu_map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006551{
6552 int i;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006553 struct root_domain *rd;
John Hawkesd1b55132005-09-06 15:18:14 -07006554#ifdef CONFIG_NUMA
6555 struct sched_group **sched_group_nodes = NULL;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006556 int sd_allnodes = 0;
John Hawkesd1b55132005-09-06 15:18:14 -07006557
6558 /*
6559 * Allocate the per-node list of sched groups
6560 */
Milton Miller5cf9f062007-10-15 17:00:19 +02006561 sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006562 GFP_KERNEL);
John Hawkesd1b55132005-09-06 15:18:14 -07006563 if (!sched_group_nodes) {
6564 printk(KERN_WARNING "Can not alloc sched group node list\n");
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006565 return -ENOMEM;
John Hawkesd1b55132005-09-06 15:18:14 -07006566 }
6567 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
6568#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006569
Gregory Haskinsdc938522008-01-25 21:08:26 +01006570 rd = alloc_rootdomain();
Gregory Haskins57d885f2008-01-25 21:08:18 +01006571 if (!rd) {
6572 printk(KERN_WARNING "Cannot alloc root domain\n");
6573 return -ENOMEM;
6574 }
6575
Linus Torvalds1da177e2005-04-16 15:20:36 -07006576 /*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006577 * Set up domains for cpus specified by the cpu_map.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006578 */
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006579 for_each_cpu_mask(i, *cpu_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006580 struct sched_domain *sd = NULL, *p;
6581 cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
6582
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006583 cpus_and(nodemask, nodemask, *cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006584
6585#ifdef CONFIG_NUMA
Ingo Molnardd41f592007-07-09 18:51:59 +02006586 if (cpus_weight(*cpu_map) >
6587 SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
John Hawkes9c1cfda2005-09-06 15:18:14 -07006588 sd = &per_cpu(allnodes_domains, i);
6589 *sd = SD_ALLNODES_INIT;
6590 sd->span = *cpu_map;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006591 cpu_to_allnodes_group(i, cpu_map, &sd->groups);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006592 p = sd;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006593 sd_allnodes = 1;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006594 } else
6595 p = NULL;
6596
Linus Torvalds1da177e2005-04-16 15:20:36 -07006597 sd = &per_cpu(node_domains, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006598 *sd = SD_NODE_INIT;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006599 sd->span = sched_domain_node_span(cpu_to_node(i));
6600 sd->parent = p;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006601 if (p)
6602 p->child = sd;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006603 cpus_and(sd->span, sd->span, *cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006604#endif
6605
6606 p = sd;
6607 sd = &per_cpu(phys_domains, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006608 *sd = SD_CPU_INIT;
6609 sd->span = nodemask;
6610 sd->parent = p;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006611 if (p)
6612 p->child = sd;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006613 cpu_to_phys_group(i, cpu_map, &sd->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006614
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006615#ifdef CONFIG_SCHED_MC
6616 p = sd;
6617 sd = &per_cpu(core_domains, i);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006618 *sd = SD_MC_INIT;
6619 sd->span = cpu_coregroup_map(i);
6620 cpus_and(sd->span, sd->span, *cpu_map);
6621 sd->parent = p;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006622 p->child = sd;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006623 cpu_to_core_group(i, cpu_map, &sd->groups);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006624#endif
6625
Linus Torvalds1da177e2005-04-16 15:20:36 -07006626#ifdef CONFIG_SCHED_SMT
6627 p = sd;
6628 sd = &per_cpu(cpu_domains, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006629 *sd = SD_SIBLING_INIT;
Mike Travisd5a74302007-10-16 01:24:05 -07006630 sd->span = per_cpu(cpu_sibling_map, i);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006631 cpus_and(sd->span, sd->span, *cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006632 sd->parent = p;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006633 p->child = sd;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006634 cpu_to_cpu_group(i, cpu_map, &sd->groups);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006635#endif
6636 }
6637
6638#ifdef CONFIG_SCHED_SMT
6639 /* Set up CPU (sibling) groups */
John Hawkes9c1cfda2005-09-06 15:18:14 -07006640 for_each_cpu_mask(i, *cpu_map) {
Mike Travisd5a74302007-10-16 01:24:05 -07006641 cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006642 cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006643 if (i != first_cpu(this_sibling_map))
6644 continue;
6645
Ingo Molnardd41f592007-07-09 18:51:59 +02006646 init_sched_build_groups(this_sibling_map, cpu_map,
6647 &cpu_to_cpu_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006648 }
6649#endif
6650
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006651#ifdef CONFIG_SCHED_MC
6652 /* Set up multi-core groups */
6653 for_each_cpu_mask(i, *cpu_map) {
6654 cpumask_t this_core_map = cpu_coregroup_map(i);
6655 cpus_and(this_core_map, this_core_map, *cpu_map);
6656 if (i != first_cpu(this_core_map))
6657 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02006658 init_sched_build_groups(this_core_map, cpu_map,
6659 &cpu_to_core_group);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006660 }
6661#endif
6662
Linus Torvalds1da177e2005-04-16 15:20:36 -07006663 /* Set up physical groups */
6664 for (i = 0; i < MAX_NUMNODES; i++) {
6665 cpumask_t nodemask = node_to_cpumask(i);
6666
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006667 cpus_and(nodemask, nodemask, *cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006668 if (cpus_empty(nodemask))
6669 continue;
6670
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006671 init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006672 }
6673
6674#ifdef CONFIG_NUMA
6675 /* Set up node groups */
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006676 if (sd_allnodes)
Ingo Molnardd41f592007-07-09 18:51:59 +02006677 init_sched_build_groups(*cpu_map, cpu_map,
6678 &cpu_to_allnodes_group);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006679
6680 for (i = 0; i < MAX_NUMNODES; i++) {
6681 /* Set up node groups */
6682 struct sched_group *sg, *prev;
6683 cpumask_t nodemask = node_to_cpumask(i);
6684 cpumask_t domainspan;
6685 cpumask_t covered = CPU_MASK_NONE;
6686 int j;
6687
6688 cpus_and(nodemask, nodemask, *cpu_map);
John Hawkesd1b55132005-09-06 15:18:14 -07006689 if (cpus_empty(nodemask)) {
6690 sched_group_nodes[i] = NULL;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006691 continue;
John Hawkesd1b55132005-09-06 15:18:14 -07006692 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006693
6694 domainspan = sched_domain_node_span(i);
6695 cpus_and(domainspan, domainspan, *cpu_map);
6696
Srivatsa Vaddagiri15f0b672006-06-27 02:54:40 -07006697 sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006698 if (!sg) {
6699 printk(KERN_WARNING "Can not alloc domain group for "
6700 "node %d\n", i);
6701 goto error;
6702 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006703 sched_group_nodes[i] = sg;
6704 for_each_cpu_mask(j, nodemask) {
6705 struct sched_domain *sd;
Ingo Molnar9761eea2007-07-09 18:52:00 +02006706
John Hawkes9c1cfda2005-09-06 15:18:14 -07006707 sd = &per_cpu(node_domains, j);
6708 sd->groups = sg;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006709 }
Eric Dumazet5517d862007-05-08 00:32:57 -07006710 sg->__cpu_power = 0;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006711 sg->cpumask = nodemask;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006712 sg->next = sg;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006713 cpus_or(covered, covered, nodemask);
6714 prev = sg;
6715
6716 for (j = 0; j < MAX_NUMNODES; j++) {
6717 cpumask_t tmp, notcovered;
6718 int n = (i + j) % MAX_NUMNODES;
6719
6720 cpus_complement(notcovered, covered);
6721 cpus_and(tmp, notcovered, *cpu_map);
6722 cpus_and(tmp, tmp, domainspan);
6723 if (cpus_empty(tmp))
6724 break;
6725
6726 nodemask = node_to_cpumask(n);
6727 cpus_and(tmp, tmp, nodemask);
6728 if (cpus_empty(tmp))
6729 continue;
6730
Srivatsa Vaddagiri15f0b672006-06-27 02:54:40 -07006731 sg = kmalloc_node(sizeof(struct sched_group),
6732 GFP_KERNEL, i);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006733 if (!sg) {
6734 printk(KERN_WARNING
6735 "Can not alloc domain group for node %d\n", j);
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006736 goto error;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006737 }
Eric Dumazet5517d862007-05-08 00:32:57 -07006738 sg->__cpu_power = 0;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006739 sg->cpumask = tmp;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006740 sg->next = prev->next;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006741 cpus_or(covered, covered, tmp);
6742 prev->next = sg;
6743 prev = sg;
6744 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006745 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006746#endif
6747
6748 /* Calculate CPU power for physical packages and nodes */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006749#ifdef CONFIG_SCHED_SMT
6750 for_each_cpu_mask(i, *cpu_map) {
Ingo Molnardd41f592007-07-09 18:51:59 +02006751 struct sched_domain *sd = &per_cpu(cpu_domains, i);
6752
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006753 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006754 }
6755#endif
6756#ifdef CONFIG_SCHED_MC
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006757 for_each_cpu_mask(i, *cpu_map) {
Ingo Molnardd41f592007-07-09 18:51:59 +02006758 struct sched_domain *sd = &per_cpu(core_domains, i);
6759
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006760 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006761 }
6762#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006763
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006764 for_each_cpu_mask(i, *cpu_map) {
Ingo Molnardd41f592007-07-09 18:51:59 +02006765 struct sched_domain *sd = &per_cpu(phys_domains, i);
6766
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006767 init_sched_groups_power(i, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006768 }
6769
John Hawkes9c1cfda2005-09-06 15:18:14 -07006770#ifdef CONFIG_NUMA
Siddha, Suresh B08069032006-03-27 01:15:23 -08006771 for (i = 0; i < MAX_NUMNODES; i++)
6772 init_numa_sched_groups_power(sched_group_nodes[i]);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006773
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006774 if (sd_allnodes) {
6775 struct sched_group *sg;
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07006776
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006777 cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07006778 init_numa_sched_groups_power(sg);
6779 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006780#endif
6781
Linus Torvalds1da177e2005-04-16 15:20:36 -07006782 /* Attach the domains */
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006783 for_each_cpu_mask(i, *cpu_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006784 struct sched_domain *sd;
6785#ifdef CONFIG_SCHED_SMT
6786 sd = &per_cpu(cpu_domains, i);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006787#elif defined(CONFIG_SCHED_MC)
6788 sd = &per_cpu(core_domains, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006789#else
6790 sd = &per_cpu(phys_domains, i);
6791#endif
Gregory Haskins57d885f2008-01-25 21:08:18 +01006792 cpu_attach_domain(sd, rd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006793 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006794
6795 return 0;
6796
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006797#ifdef CONFIG_NUMA
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006798error:
6799 free_sched_groups(cpu_map);
6800 return -ENOMEM;
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006801#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006802}
Paul Jackson029190c2007-10-18 23:40:20 -07006803
6804static cpumask_t *doms_cur; /* current sched domains */
6805static int ndoms_cur; /* number of sched domains in 'doms_cur' */
6806
6807/*
6808 * Special case: If a kmalloc of a doms_cur partition (array of
6809 * cpumask_t) fails, then fallback to a single sched domain,
6810 * as determined by the single cpumask_t fallback_doms.
6811 */
6812static cpumask_t fallback_doms;
6813
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006814/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006815 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
Paul Jackson029190c2007-10-18 23:40:20 -07006816 * For now this just excludes isolated cpus, but could be used to
6817 * exclude other special cases in the future.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006818 */
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006819static int arch_init_sched_domains(const cpumask_t *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006820{
Milton Miller73785472007-10-24 18:23:48 +02006821 int err;
6822
Paul Jackson029190c2007-10-18 23:40:20 -07006823 ndoms_cur = 1;
6824 doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
6825 if (!doms_cur)
6826 doms_cur = &fallback_doms;
6827 cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
Milton Miller73785472007-10-24 18:23:48 +02006828 err = build_sched_domains(doms_cur);
Milton Miller6382bc92007-10-15 17:00:19 +02006829 register_sched_domain_sysctl();
Milton Miller73785472007-10-24 18:23:48 +02006830
6831 return err;
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006832}
6833
6834static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006835{
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006836 free_sched_groups(cpu_map);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006837}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006838
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006839/*
6840 * Detach sched domains from a group of cpus specified in cpu_map
6841 * These cpus will now be attached to the NULL domain
6842 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08006843static void detach_destroy_domains(const cpumask_t *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006844{
6845 int i;
6846
Milton Miller6382bc92007-10-15 17:00:19 +02006847 unregister_sched_domain_sysctl();
6848
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006849 for_each_cpu_mask(i, *cpu_map)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006850 cpu_attach_domain(NULL, &def_root_domain, i);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006851 synchronize_sched();
6852 arch_destroy_sched_domains(cpu_map);
6853}
6854
Paul Jackson029190c2007-10-18 23:40:20 -07006855/*
6856 * Partition sched domains as specified by the 'ndoms_new'
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006857 * cpumasks in the array doms_new[] of cpumasks. This compares
Paul Jackson029190c2007-10-18 23:40:20 -07006858 * doms_new[] to the current sched domain partitioning, doms_cur[].
6859 * It destroys each deleted domain and builds each new domain.
6860 *
6861 * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006862 * The masks don't intersect (don't overlap.) We should setup one
6863 * sched domain for each mask. CPUs not in any of the cpumasks will
6864 * not be load balanced. If the same cpumask appears both in the
Paul Jackson029190c2007-10-18 23:40:20 -07006865 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6866 * it as it is.
6867 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006868 * The passed in 'doms_new' should be kmalloc'd. This routine takes
6869 * ownership of it and will kfree it when done with it. If the caller
Paul Jackson029190c2007-10-18 23:40:20 -07006870 * failed the kmalloc call, then it can pass in doms_new == NULL,
6871 * and partition_sched_domains() will fallback to the single partition
6872 * 'fallback_doms'.
6873 *
6874 * Call with hotplug lock held
6875 */
6876void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
6877{
6878 int i, j;
6879
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01006880 lock_doms_cur();
6881
Milton Miller73785472007-10-24 18:23:48 +02006882 /* always unregister in case we don't destroy any domains */
6883 unregister_sched_domain_sysctl();
6884
Paul Jackson029190c2007-10-18 23:40:20 -07006885 if (doms_new == NULL) {
6886 ndoms_new = 1;
6887 doms_new = &fallback_doms;
6888 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
6889 }
6890
6891 /* Destroy deleted domains */
6892 for (i = 0; i < ndoms_cur; i++) {
6893 for (j = 0; j < ndoms_new; j++) {
6894 if (cpus_equal(doms_cur[i], doms_new[j]))
6895 goto match1;
6896 }
6897 /* no match - a current sched domain not in new doms_new[] */
6898 detach_destroy_domains(doms_cur + i);
6899match1:
6900 ;
6901 }
6902
6903 /* Build new domains */
6904 for (i = 0; i < ndoms_new; i++) {
6905 for (j = 0; j < ndoms_cur; j++) {
6906 if (cpus_equal(doms_new[i], doms_cur[j]))
6907 goto match2;
6908 }
6909 /* no match - add a new doms_new */
6910 build_sched_domains(doms_new + i);
6911match2:
6912 ;
6913 }
6914
6915 /* Remember the new sched domains */
6916 if (doms_cur != &fallback_doms)
6917 kfree(doms_cur);
6918 doms_cur = doms_new;
6919 ndoms_cur = ndoms_new;
Milton Miller73785472007-10-24 18:23:48 +02006920
6921 register_sched_domain_sysctl();
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01006922
6923 unlock_doms_cur();
Paul Jackson029190c2007-10-18 23:40:20 -07006924}
6925
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006926#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Adrian Bunk6707de002007-08-12 18:08:19 +02006927static int arch_reinit_sched_domains(void)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006928{
6929 int err;
6930
Gautham R Shenoy95402b32008-01-25 21:08:02 +01006931 get_online_cpus();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006932 detach_destroy_domains(&cpu_online_map);
6933 err = arch_init_sched_domains(&cpu_online_map);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01006934 put_online_cpus();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006935
6936 return err;
6937}
6938
6939static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
6940{
6941 int ret;
6942
6943 if (buf[0] != '0' && buf[0] != '1')
6944 return -EINVAL;
6945
6946 if (smt)
6947 sched_smt_power_savings = (buf[0] == '1');
6948 else
6949 sched_mc_power_savings = (buf[0] == '1');
6950
6951 ret = arch_reinit_sched_domains();
6952
6953 return ret ? ret : count;
6954}
6955
Adrian Bunk6707de002007-08-12 18:08:19 +02006956#ifdef CONFIG_SCHED_MC
6957static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
6958{
6959 return sprintf(page, "%u\n", sched_mc_power_savings);
6960}
6961static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
6962 const char *buf, size_t count)
6963{
6964 return sched_power_savings_store(buf, count, 0);
6965}
6966static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
6967 sched_mc_power_savings_store);
6968#endif
6969
6970#ifdef CONFIG_SCHED_SMT
6971static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
6972{
6973 return sprintf(page, "%u\n", sched_smt_power_savings);
6974}
6975static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
6976 const char *buf, size_t count)
6977{
6978 return sched_power_savings_store(buf, count, 1);
6979}
6980static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
6981 sched_smt_power_savings_store);
6982#endif
6983
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006984int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6985{
6986 int err = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006987
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006988#ifdef CONFIG_SCHED_SMT
6989 if (smt_capable())
6990 err = sysfs_create_file(&cls->kset.kobj,
6991 &attr_sched_smt_power_savings.attr);
6992#endif
6993#ifdef CONFIG_SCHED_MC
6994 if (!err && mc_capable())
6995 err = sysfs_create_file(&cls->kset.kobj,
6996 &attr_sched_mc_power_savings.attr);
6997#endif
6998 return err;
6999}
7000#endif
7001
Linus Torvalds1da177e2005-04-16 15:20:36 -07007002/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007003 * Force a reinitialization of the sched domains hierarchy. The domains
Linus Torvalds1da177e2005-04-16 15:20:36 -07007004 * and groups cannot be updated in place without racing with the balancing
Nick Piggin41c7ce92005-06-25 14:57:24 -07007005 * code, so we temporarily attach all running cpus to the NULL domain
Linus Torvalds1da177e2005-04-16 15:20:36 -07007006 * which will prevent rebalancing while the sched domains are recalculated.
7007 */
7008static int update_sched_domains(struct notifier_block *nfb,
7009 unsigned long action, void *hcpu)
7010{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007011 switch (action) {
7012 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007013 case CPU_UP_PREPARE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007014 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007015 case CPU_DOWN_PREPARE_FROZEN:
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007016 detach_destroy_domains(&cpu_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007017 return NOTIFY_OK;
7018
7019 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007020 case CPU_UP_CANCELED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007021 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007022 case CPU_DOWN_FAILED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007023 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007024 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007025 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007026 case CPU_DEAD_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007027 /*
7028 * Fall through and re-initialise the domains.
7029 */
7030 break;
7031 default:
7032 return NOTIFY_DONE;
7033 }
7034
7035 /* The hotplug lock is already held by cpu_up/cpu_down */
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007036 arch_init_sched_domains(&cpu_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007037
7038 return NOTIFY_OK;
7039}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007040
7041void __init sched_init_smp(void)
7042{
Nick Piggin5c1e1762006-10-03 01:14:04 -07007043 cpumask_t non_isolated_cpus;
7044
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007045 get_online_cpus();
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007046 arch_init_sched_domains(&cpu_online_map);
Nathan Lynche5e56732007-01-10 23:15:28 -08007047 cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
Nick Piggin5c1e1762006-10-03 01:14:04 -07007048 if (cpus_empty(non_isolated_cpus))
7049 cpu_set(smp_processor_id(), non_isolated_cpus);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007050 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007051 /* XXX: Theoretical race here - CPU may be hotplugged now */
7052 hotcpu_notifier(update_sched_domains, 0);
Nick Piggin5c1e1762006-10-03 01:14:04 -07007053
7054 /* Move init over to a non-isolated CPU */
7055 if (set_cpus_allowed(current, non_isolated_cpus) < 0)
7056 BUG();
Ingo Molnar19978ca2007-11-09 22:39:38 +01007057 sched_init_granularity();
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007058
7059#ifdef CONFIG_FAIR_GROUP_SCHED
7060 if (nr_cpu_ids == 1)
7061 return;
7062
7063 lb_monitor_task = kthread_create(load_balance_monitor, NULL,
7064 "group_balance");
7065 if (!IS_ERR(lb_monitor_task)) {
7066 lb_monitor_task->flags |= PF_NOFREEZE;
7067 wake_up_process(lb_monitor_task);
7068 } else {
7069 printk(KERN_ERR "Could not create load balance monitor thread"
7070 "(error = %ld) \n", PTR_ERR(lb_monitor_task));
7071 }
7072#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07007073}
7074#else
7075void __init sched_init_smp(void)
7076{
Ingo Molnar19978ca2007-11-09 22:39:38 +01007077 sched_init_granularity();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007078}
7079#endif /* CONFIG_SMP */
7080
7081int in_sched_functions(unsigned long addr)
7082{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007083 return in_lock_functions(addr) ||
7084 (addr >= (unsigned long)__sched_text_start
7085 && addr < (unsigned long)__sched_text_end);
7086}
7087
Alexey Dobriyana9957442007-10-15 17:00:13 +02007088static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02007089{
7090 cfs_rq->tasks_timeline = RB_ROOT;
Ingo Molnardd41f592007-07-09 18:51:59 +02007091#ifdef CONFIG_FAIR_GROUP_SCHED
7092 cfs_rq->rq = rq;
7093#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02007094 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
Ingo Molnardd41f592007-07-09 18:51:59 +02007095}
7096
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007097static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7098{
7099 struct rt_prio_array *array;
7100 int i;
7101
7102 array = &rt_rq->active;
7103 for (i = 0; i < MAX_RT_PRIO; i++) {
7104 INIT_LIST_HEAD(array->queue + i);
7105 __clear_bit(i, array->bitmap);
7106 }
7107 /* delimiter for bitsearch: */
7108 __set_bit(MAX_RT_PRIO, array->bitmap);
7109
Peter Zijlstra48d5e252008-01-25 21:08:31 +01007110#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
7111 rt_rq->highest_prio = MAX_RT_PRIO;
7112#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007113#ifdef CONFIG_SMP
7114 rt_rq->rt_nr_migratory = 0;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007115 rt_rq->overloaded = 0;
7116#endif
7117
7118 rt_rq->rt_time = 0;
7119 rt_rq->rt_throttled = 0;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007120
7121#ifdef CONFIG_FAIR_GROUP_SCHED
7122 rt_rq->rq = rq;
7123#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007124}
7125
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007126#ifdef CONFIG_FAIR_GROUP_SCHED
7127static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg,
7128 struct cfs_rq *cfs_rq, struct sched_entity *se,
7129 int cpu, int add)
7130{
7131 tg->cfs_rq[cpu] = cfs_rq;
7132 init_cfs_rq(cfs_rq, rq);
7133 cfs_rq->tg = tg;
7134 if (add)
7135 list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
7136
7137 tg->se[cpu] = se;
7138 se->cfs_rq = &rq->cfs;
7139 se->my_q = cfs_rq;
7140 se->load.weight = tg->shares;
7141 se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
7142 se->parent = NULL;
7143}
7144
7145static void init_tg_rt_entry(struct rq *rq, struct task_group *tg,
7146 struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
7147 int cpu, int add)
7148{
7149 tg->rt_rq[cpu] = rt_rq;
7150 init_rt_rq(rt_rq, rq);
7151 rt_rq->tg = tg;
7152 rt_rq->rt_se = rt_se;
7153 if (add)
7154 list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
7155
7156 tg->rt_se[cpu] = rt_se;
7157 rt_se->rt_rq = &rq->rt;
7158 rt_se->my_q = rt_rq;
7159 rt_se->parent = NULL;
7160 INIT_LIST_HEAD(&rt_se->run_list);
7161}
7162#endif
7163
Linus Torvalds1da177e2005-04-16 15:20:36 -07007164void __init sched_init(void)
7165{
Christoph Lameter476f3532007-05-06 14:48:58 -07007166 int highest_cpu = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02007167 int i, j;
7168
Gregory Haskins57d885f2008-01-25 21:08:18 +01007169#ifdef CONFIG_SMP
7170 init_defrootdomain();
7171#endif
7172
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007173#ifdef CONFIG_FAIR_GROUP_SCHED
7174 list_add(&init_task_group.list, &task_groups);
7175#endif
7176
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08007177 for_each_possible_cpu(i) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07007178 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007179
7180 rq = cpu_rq(i);
7181 spin_lock_init(&rq->lock);
Ingo Molnarfcb99372006-07-03 00:25:10 -07007182 lockdep_set_class(&rq->lock, &rq->rq_lock_key);
Nick Piggin78979862005-06-25 14:57:13 -07007183 rq->nr_running = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02007184 rq->clock = 1;
7185 init_cfs_rq(&rq->cfs, rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007186 init_rt_rq(&rq->rt, rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007187#ifdef CONFIG_FAIR_GROUP_SCHED
7188 init_task_group.shares = init_task_group_load;
7189 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7190 init_tg_cfs_entry(rq, &init_task_group,
7191 &per_cpu(init_cfs_rq, i),
7192 &per_cpu(init_sched_entity, i), i, 1);
7193
7194 init_task_group.rt_ratio = sysctl_sched_rt_ratio; /* XXX */
7195 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
7196 init_tg_rt_entry(rq, &init_task_group,
7197 &per_cpu(init_rt_rq, i),
7198 &per_cpu(init_sched_rt_entity, i), i, 1);
7199#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007200 rq->rt_period_expire = 0;
Peter Zijlstra48d5e252008-01-25 21:08:31 +01007201 rq->rt_throttled = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007202
Ingo Molnardd41f592007-07-09 18:51:59 +02007203 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7204 rq->cpu_load[j] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007205#ifdef CONFIG_SMP
Nick Piggin41c7ce92005-06-25 14:57:24 -07007206 rq->sd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01007207 rq->rd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007208 rq->active_balance = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02007209 rq->next_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007210 rq->push_cpu = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07007211 rq->cpu = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007212 rq->migration_thread = NULL;
7213 INIT_LIST_HEAD(&rq->migration_queue);
Gregory Haskinsdc938522008-01-25 21:08:26 +01007214 rq_attach_root(rq, &def_root_domain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007215#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01007216 init_rq_hrtick(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007217 atomic_set(&rq->nr_iowait, 0);
Christoph Lameter476f3532007-05-06 14:48:58 -07007218 highest_cpu = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007219 }
7220
Peter Williams2dd73a42006-06-27 02:54:34 -07007221 set_load_weight(&init_task);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07007222
Avi Kivitye107be32007-07-26 13:40:43 +02007223#ifdef CONFIG_PREEMPT_NOTIFIERS
7224 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7225#endif
7226
Christoph Lameterc9819f42006-12-10 02:20:25 -08007227#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07007228 nr_cpu_ids = highest_cpu + 1;
Christoph Lameterc9819f42006-12-10 02:20:25 -08007229 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
7230#endif
7231
Heiko Carstensb50f60c2006-07-30 03:03:52 -07007232#ifdef CONFIG_RT_MUTEXES
7233 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
7234#endif
7235
Linus Torvalds1da177e2005-04-16 15:20:36 -07007236 /*
7237 * The boot idle thread does lazy MMU switching as well:
7238 */
7239 atomic_inc(&init_mm.mm_count);
7240 enter_lazy_tlb(&init_mm, current);
7241
7242 /*
7243 * Make us the idle thread. Technically, schedule() should not be
7244 * called from this thread, however somewhere below it might be,
7245 * but because we are the idle thread, we just pick up running again
7246 * when this runqueue becomes "idle".
7247 */
7248 init_idle(current, smp_processor_id());
Ingo Molnardd41f592007-07-09 18:51:59 +02007249 /*
7250 * During early bootup we pretend to be a normal task:
7251 */
7252 current->sched_class = &fair_sched_class;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007253}
7254
7255#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
7256void __might_sleep(char *file, int line)
7257{
Ingo Molnar48f24c42006-07-03 00:25:40 -07007258#ifdef in_atomic
Linus Torvalds1da177e2005-04-16 15:20:36 -07007259 static unsigned long prev_jiffy; /* ratelimiting */
7260
7261 if ((in_atomic() || irqs_disabled()) &&
7262 system_state == SYSTEM_RUNNING && !oops_in_progress) {
7263 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7264 return;
7265 prev_jiffy = jiffies;
Ingo Molnar91368d72006-03-23 03:00:54 -08007266 printk(KERN_ERR "BUG: sleeping function called from invalid"
Linus Torvalds1da177e2005-04-16 15:20:36 -07007267 " context at %s:%d\n", file, line);
7268 printk("in_atomic():%d, irqs_disabled():%d\n",
7269 in_atomic(), irqs_disabled());
Peter Zijlstraa4c410f2006-12-06 20:37:21 -08007270 debug_show_held_locks(current);
Ingo Molnar3117df02006-12-13 00:34:43 -08007271 if (irqs_disabled())
7272 print_irqtrace_events(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007273 dump_stack();
7274 }
7275#endif
7276}
7277EXPORT_SYMBOL(__might_sleep);
7278#endif
7279
7280#ifdef CONFIG_MAGIC_SYSRQ
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02007281static void normalize_task(struct rq *rq, struct task_struct *p)
7282{
7283 int on_rq;
7284 update_rq_clock(rq);
7285 on_rq = p->se.on_rq;
7286 if (on_rq)
7287 deactivate_task(rq, p, 0);
7288 __setscheduler(rq, p, SCHED_NORMAL, 0);
7289 if (on_rq) {
7290 activate_task(rq, p, 0);
7291 resched_task(rq->curr);
7292 }
7293}
7294
Linus Torvalds1da177e2005-04-16 15:20:36 -07007295void normalize_rt_tasks(void)
7296{
Ingo Molnara0f98a12007-06-17 18:37:45 +02007297 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007298 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07007299 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007300
7301 read_lock_irq(&tasklist_lock);
Ingo Molnara0f98a12007-06-17 18:37:45 +02007302 do_each_thread(g, p) {
Ingo Molnar178be792007-10-15 17:00:18 +02007303 /*
7304 * Only normalize user tasks:
7305 */
7306 if (!p->mm)
7307 continue;
7308
Ingo Molnardd41f592007-07-09 18:51:59 +02007309 p->se.exec_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02007310#ifdef CONFIG_SCHEDSTATS
7311 p->se.wait_start = 0;
7312 p->se.sleep_start = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02007313 p->se.block_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02007314#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02007315 task_rq(p)->clock = 0;
7316
7317 if (!rt_task(p)) {
7318 /*
7319 * Renice negative nice level userspace
7320 * tasks back to 0:
7321 */
7322 if (TASK_NICE(p) < 0 && p->mm)
7323 set_user_nice(p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007324 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02007325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007326
Ingo Molnarb29739f2006-06-27 02:54:51 -07007327 spin_lock_irqsave(&p->pi_lock, flags);
7328 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007329
Ingo Molnar178be792007-10-15 17:00:18 +02007330 normalize_task(rq, p);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02007331
Ingo Molnarb29739f2006-06-27 02:54:51 -07007332 __task_rq_unlock(rq);
7333 spin_unlock_irqrestore(&p->pi_lock, flags);
Ingo Molnara0f98a12007-06-17 18:37:45 +02007334 } while_each_thread(g, p);
7335
Linus Torvalds1da177e2005-04-16 15:20:36 -07007336 read_unlock_irq(&tasklist_lock);
7337}
7338
7339#endif /* CONFIG_MAGIC_SYSRQ */
Linus Torvalds1df5c102005-09-12 07:59:21 -07007340
7341#ifdef CONFIG_IA64
7342/*
7343 * These functions are only useful for the IA64 MCA handling.
7344 *
7345 * They can only be called when the whole system has been
7346 * stopped - every CPU needs to be quiescent, and no scheduling
7347 * activity can take place. Using them for anything else would
7348 * be a serious bug, and as a result, they aren't even visible
7349 * under any other configuration.
7350 */
7351
7352/**
7353 * curr_task - return the current task for a given cpu.
7354 * @cpu: the processor in question.
7355 *
7356 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7357 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07007358struct task_struct *curr_task(int cpu)
Linus Torvalds1df5c102005-09-12 07:59:21 -07007359{
7360 return cpu_curr(cpu);
7361}
7362
7363/**
7364 * set_curr_task - set the current task for a given cpu.
7365 * @cpu: the processor in question.
7366 * @p: the task pointer to set.
7367 *
7368 * Description: This function must only be used when non-maskable interrupts
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007369 * are serviced on a separate stack. It allows the architecture to switch the
7370 * notion of the current task on a cpu in a non-blocking manner. This function
Linus Torvalds1df5c102005-09-12 07:59:21 -07007371 * must be called with all CPU's synchronized, and interrupts disabled, the
7372 * and caller must save the original value of the current task (see
7373 * curr_task() above) and restore that value before reenabling interrupts and
7374 * re-starting the system.
7375 *
7376 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7377 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07007378void set_curr_task(int cpu, struct task_struct *p)
Linus Torvalds1df5c102005-09-12 07:59:21 -07007379{
7380 cpu_curr(cpu) = p;
7381}
7382
7383#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007384
7385#ifdef CONFIG_FAIR_GROUP_SCHED
7386
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007387#ifdef CONFIG_SMP
7388/*
7389 * distribute shares of all task groups among their schedulable entities,
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007390 * to reflect load distribution across cpus.
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007391 */
7392static int rebalance_shares(struct sched_domain *sd, int this_cpu)
7393{
7394 struct cfs_rq *cfs_rq;
7395 struct rq *rq = cpu_rq(this_cpu);
7396 cpumask_t sdspan = sd->span;
7397 int balanced = 1;
7398
7399 /* Walk thr' all the task groups that we have */
7400 for_each_leaf_cfs_rq(rq, cfs_rq) {
7401 int i;
7402 unsigned long total_load = 0, total_shares;
7403 struct task_group *tg = cfs_rq->tg;
7404
7405 /* Gather total task load of this group across cpus */
7406 for_each_cpu_mask(i, sdspan)
7407 total_load += tg->cfs_rq[i]->load.weight;
7408
Ingo Molnar0eab9142008-01-25 21:08:19 +01007409 /* Nothing to do if this group has no load */
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007410 if (!total_load)
7411 continue;
7412
7413 /*
7414 * tg->shares represents the number of cpu shares the task group
7415 * is eligible to hold on a single cpu. On N cpus, it is
7416 * eligible to hold (N * tg->shares) number of cpu shares.
7417 */
7418 total_shares = tg->shares * cpus_weight(sdspan);
7419
7420 /*
7421 * redistribute total_shares across cpus as per the task load
7422 * distribution.
7423 */
7424 for_each_cpu_mask(i, sdspan) {
7425 unsigned long local_load, local_shares;
7426
7427 local_load = tg->cfs_rq[i]->load.weight;
7428 local_shares = (local_load * total_shares) / total_load;
7429 if (!local_shares)
7430 local_shares = MIN_GROUP_SHARES;
7431 if (local_shares == tg->se[i]->load.weight)
7432 continue;
7433
7434 spin_lock_irq(&cpu_rq(i)->lock);
7435 set_se_shares(tg->se[i], local_shares);
7436 spin_unlock_irq(&cpu_rq(i)->lock);
7437 balanced = 0;
7438 }
7439 }
7440
7441 return balanced;
7442}
7443
7444/*
7445 * How frequently should we rebalance_shares() across cpus?
7446 *
7447 * The more frequently we rebalance shares, the more accurate is the fairness
7448 * of cpu bandwidth distribution between task groups. However higher frequency
7449 * also implies increased scheduling overhead.
7450 *
7451 * sysctl_sched_min_bal_int_shares represents the minimum interval between
7452 * consecutive calls to rebalance_shares() in the same sched domain.
7453 *
7454 * sysctl_sched_max_bal_int_shares represents the maximum interval between
7455 * consecutive calls to rebalance_shares() in the same sched domain.
7456 *
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007457 * These settings allows for the appropriate trade-off between accuracy of
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007458 * fairness and the associated overhead.
7459 *
7460 */
7461
7462/* default: 8ms, units: milliseconds */
7463const_debug unsigned int sysctl_sched_min_bal_int_shares = 8;
7464
7465/* default: 128ms, units: milliseconds */
7466const_debug unsigned int sysctl_sched_max_bal_int_shares = 128;
7467
7468/* kernel thread that runs rebalance_shares() periodically */
7469static int load_balance_monitor(void *unused)
7470{
7471 unsigned int timeout = sysctl_sched_min_bal_int_shares;
7472 struct sched_param schedparm;
7473 int ret;
7474
7475 /*
7476 * We don't want this thread's execution to be limited by the shares
7477 * assigned to default group (init_task_group). Hence make it run
7478 * as a SCHED_RR RT task at the lowest priority.
7479 */
7480 schedparm.sched_priority = 1;
7481 ret = sched_setscheduler(current, SCHED_RR, &schedparm);
7482 if (ret)
7483 printk(KERN_ERR "Couldn't set SCHED_RR policy for load balance"
7484 " monitor thread (error = %d) \n", ret);
7485
7486 while (!kthread_should_stop()) {
7487 int i, cpu, balanced = 1;
7488
7489 /* Prevent cpus going down or coming up */
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +01007490 get_online_cpus();
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007491 /* lockout changes to doms_cur[] array */
7492 lock_doms_cur();
7493 /*
7494 * Enter a rcu read-side critical section to safely walk rq->sd
7495 * chain on various cpus and to walk task group list
7496 * (rq->leaf_cfs_rq_list) in rebalance_shares().
7497 */
7498 rcu_read_lock();
7499
7500 for (i = 0; i < ndoms_cur; i++) {
7501 cpumask_t cpumap = doms_cur[i];
7502 struct sched_domain *sd = NULL, *sd_prev = NULL;
7503
7504 cpu = first_cpu(cpumap);
7505
7506 /* Find the highest domain at which to balance shares */
7507 for_each_domain(cpu, sd) {
7508 if (!(sd->flags & SD_LOAD_BALANCE))
7509 continue;
7510 sd_prev = sd;
7511 }
7512
7513 sd = sd_prev;
7514 /* sd == NULL? No load balance reqd in this domain */
7515 if (!sd)
7516 continue;
7517
7518 balanced &= rebalance_shares(sd, cpu);
7519 }
7520
7521 rcu_read_unlock();
7522
7523 unlock_doms_cur();
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +01007524 put_online_cpus();
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007525
7526 if (!balanced)
7527 timeout = sysctl_sched_min_bal_int_shares;
7528 else if (timeout < sysctl_sched_max_bal_int_shares)
7529 timeout *= 2;
7530
7531 msleep_interruptible(timeout);
7532 }
7533
7534 return 0;
7535}
7536#endif /* CONFIG_SMP */
7537
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007538static void free_sched_group(struct task_group *tg)
7539{
7540 int i;
7541
7542 for_each_possible_cpu(i) {
7543 if (tg->cfs_rq)
7544 kfree(tg->cfs_rq[i]);
7545 if (tg->se)
7546 kfree(tg->se[i]);
7547 if (tg->rt_rq)
7548 kfree(tg->rt_rq[i]);
7549 if (tg->rt_se)
7550 kfree(tg->rt_se[i]);
7551 }
7552
7553 kfree(tg->cfs_rq);
7554 kfree(tg->se);
7555 kfree(tg->rt_rq);
7556 kfree(tg->rt_se);
7557 kfree(tg);
7558}
7559
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007560/* allocate runqueue etc for a new task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02007561struct task_group *sched_create_group(void)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007562{
Ingo Molnar4cf86d72007-10-15 17:00:14 +02007563 struct task_group *tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007564 struct cfs_rq *cfs_rq;
7565 struct sched_entity *se;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007566 struct rt_rq *rt_rq;
7567 struct sched_rt_entity *rt_se;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007568 struct rq *rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007569 int i;
7570
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007571 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7572 if (!tg)
7573 return ERR_PTR(-ENOMEM);
7574
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007575 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007576 if (!tg->cfs_rq)
7577 goto err;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007578 tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007579 if (!tg->se)
7580 goto err;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007581 tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL);
7582 if (!tg->rt_rq)
7583 goto err;
7584 tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL);
7585 if (!tg->rt_se)
7586 goto err;
7587
7588 tg->shares = NICE_0_LOAD;
7589 tg->rt_ratio = 0; /* XXX */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007590
7591 for_each_possible_cpu(i) {
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007592 rq = cpu_rq(i);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007593
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007594 cfs_rq = kmalloc_node(sizeof(struct cfs_rq),
7595 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007596 if (!cfs_rq)
7597 goto err;
7598
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007599 se = kmalloc_node(sizeof(struct sched_entity),
7600 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007601 if (!se)
7602 goto err;
7603
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007604 rt_rq = kmalloc_node(sizeof(struct rt_rq),
7605 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
7606 if (!rt_rq)
7607 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007608
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007609 rt_se = kmalloc_node(sizeof(struct sched_rt_entity),
7610 GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
7611 if (!rt_se)
7612 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007613
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007614 init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0);
7615 init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007616 }
7617
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +01007618 lock_task_group_list();
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007619 for_each_possible_cpu(i) {
7620 rq = cpu_rq(i);
7621 cfs_rq = tg->cfs_rq[i];
7622 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007623 rt_rq = tg->rt_rq[i];
7624 list_add_rcu(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007625 }
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007626 list_add_rcu(&tg->list, &task_groups);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +01007627 unlock_task_group_list();
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007628
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007629 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007630
7631err:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007632 free_sched_group(tg);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007633 return ERR_PTR(-ENOMEM);
7634}
7635
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007636/* rcu callback to free various structures associated with a task group */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007637static void free_sched_group_rcu(struct rcu_head *rhp)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007638{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007639 /* now it should be safe to free those cfs_rqs */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007640 free_sched_group(container_of(rhp, struct task_group, rcu));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007641}
7642
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007643/* Destroy runqueue etc associated with a task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02007644void sched_destroy_group(struct task_group *tg)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007645{
James Bottomley7bae49d2007-10-29 21:18:11 +01007646 struct cfs_rq *cfs_rq = NULL;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007647 struct rt_rq *rt_rq = NULL;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007648 int i;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007649
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +01007650 lock_task_group_list();
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007651 for_each_possible_cpu(i) {
7652 cfs_rq = tg->cfs_rq[i];
7653 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007654 rt_rq = tg->rt_rq[i];
7655 list_del_rcu(&rt_rq->leaf_rt_rq_list);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007656 }
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007657 list_del_rcu(&tg->list);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +01007658 unlock_task_group_list();
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007659
James Bottomley7bae49d2007-10-29 21:18:11 +01007660 BUG_ON(!cfs_rq);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007661
7662 /* wait for possible concurrent references to cfs_rqs complete */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007663 call_rcu(&tg->rcu, free_sched_group_rcu);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007664}
7665
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007666/* change task's runqueue when it moves between groups.
Ingo Molnar3a252012007-10-15 17:00:12 +02007667 * The caller of this function should have put the task in its new group
7668 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7669 * reflect its new group.
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007670 */
7671void sched_move_task(struct task_struct *tsk)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007672{
7673 int on_rq, running;
7674 unsigned long flags;
7675 struct rq *rq;
7676
7677 rq = task_rq_lock(tsk, &flags);
7678
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007679 update_rq_clock(rq);
7680
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01007681 running = task_current(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007682 on_rq = tsk->se.on_rq;
7683
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007684 if (on_rq) {
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007685 dequeue_task(rq, tsk, 0);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007686 if (unlikely(running))
7687 tsk->sched_class->put_prev_task(rq, tsk);
7688 }
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007689
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007690 set_task_rq(tsk, task_cpu(tsk));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007691
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007692 if (on_rq) {
7693 if (unlikely(running))
7694 tsk->sched_class->set_curr_task(rq);
Dmitry Adamushko7074bad2007-10-15 17:00:07 +02007695 enqueue_task(rq, tsk, 0);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007696 }
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007697
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007698 task_rq_unlock(rq, &flags);
7699}
7700
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007701/* rq->lock to be locked by caller */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007702static void set_se_shares(struct sched_entity *se, unsigned long shares)
7703{
7704 struct cfs_rq *cfs_rq = se->cfs_rq;
7705 struct rq *rq = cfs_rq->rq;
7706 int on_rq;
7707
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007708 if (!shares)
7709 shares = MIN_GROUP_SHARES;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007710
7711 on_rq = se->on_rq;
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007712 if (on_rq) {
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007713 dequeue_entity(cfs_rq, se, 0);
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007714 dec_cpu_load(rq, se->load.weight);
7715 }
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007716
7717 se->load.weight = shares;
7718 se->load.inv_weight = div64_64((1ULL<<32), shares);
7719
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007720 if (on_rq) {
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007721 enqueue_entity(cfs_rq, se, 0);
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007722 inc_cpu_load(rq, se->load.weight);
7723 }
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007724}
7725
Ingo Molnar4cf86d72007-10-15 17:00:14 +02007726int sched_group_set_shares(struct task_group *tg, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007727{
7728 int i;
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007729 struct cfs_rq *cfs_rq;
7730 struct rq *rq;
Ingo Molnarc61935f2008-01-22 11:24:58 +01007731
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +01007732 lock_task_group_list();
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007733 if (tg->shares == shares)
Dhaval Giani5cb350b2007-10-15 17:00:14 +02007734 goto done;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007735
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007736 if (shares < MIN_GROUP_SHARES)
7737 shares = MIN_GROUP_SHARES;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007738
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01007739 /*
7740 * Prevent any load balance activity (rebalance_shares,
7741 * load_balance_fair) from referring to this group first,
7742 * by taking it off the rq->leaf_cfs_rq_list on each cpu.
7743 */
7744 for_each_possible_cpu(i) {
7745 cfs_rq = tg->cfs_rq[i];
7746 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
7747 }
7748
7749 /* wait for any ongoing reference to this group to finish */
7750 synchronize_sched();
7751
7752 /*
7753 * Now we are free to modify the group's share on each cpu
7754 * w/o tripping rebalance_share or load_balance_fair.
7755 */
7756 tg->shares = shares;
7757 for_each_possible_cpu(i) {
7758 spin_lock_irq(&cpu_rq(i)->lock);
7759 set_se_shares(tg->se[i], shares);
7760 spin_unlock_irq(&cpu_rq(i)->lock);
7761 }
7762
7763 /*
7764 * Enable load balance activity on this group, by inserting it back on
7765 * each cpu's rq->leaf_cfs_rq_list.
7766 */
7767 for_each_possible_cpu(i) {
7768 rq = cpu_rq(i);
7769 cfs_rq = tg->cfs_rq[i];
7770 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
7771 }
Dhaval Giani5cb350b2007-10-15 17:00:14 +02007772done:
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +01007773 unlock_task_group_list();
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007774 return 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007775}
7776
Dhaval Giani5cb350b2007-10-15 17:00:14 +02007777unsigned long sched_group_shares(struct task_group *tg)
7778{
7779 return tg->shares;
7780}
7781
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007782/*
7783 * Ensure the total rt_ratio <= sysctl_sched_rt_ratio
7784 */
7785int sched_group_set_rt_ratio(struct task_group *tg, unsigned long rt_ratio)
7786{
7787 struct task_group *tgi;
7788 unsigned long total = 0;
7789
7790 rcu_read_lock();
7791 list_for_each_entry_rcu(tgi, &task_groups, list)
7792 total += tgi->rt_ratio;
7793 rcu_read_unlock();
7794
7795 if (total + rt_ratio - tg->rt_ratio > sysctl_sched_rt_ratio)
7796 return -EINVAL;
7797
7798 tg->rt_ratio = rt_ratio;
7799 return 0;
7800}
7801
7802unsigned long sched_group_rt_ratio(struct task_group *tg)
7803{
7804 return tg->rt_ratio;
7805}
7806
Ingo Molnar3a252012007-10-15 17:00:12 +02007807#endif /* CONFIG_FAIR_GROUP_SCHED */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007808
7809#ifdef CONFIG_FAIR_CGROUP_SCHED
7810
7811/* return corresponding task_group object of a cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +02007812static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007813{
Paul Menage2b01dfe2007-10-24 18:23:50 +02007814 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
7815 struct task_group, css);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007816}
7817
7818static struct cgroup_subsys_state *
Paul Menage2b01dfe2007-10-24 18:23:50 +02007819cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007820{
7821 struct task_group *tg;
7822
Paul Menage2b01dfe2007-10-24 18:23:50 +02007823 if (!cgrp->parent) {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007824 /* This is early initialization for the top cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +02007825 init_task_group.css.cgroup = cgrp;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007826 return &init_task_group.css;
7827 }
7828
7829 /* we support only 1-level deep hierarchical scheduler atm */
Paul Menage2b01dfe2007-10-24 18:23:50 +02007830 if (cgrp->parent->parent)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007831 return ERR_PTR(-EINVAL);
7832
7833 tg = sched_create_group();
7834 if (IS_ERR(tg))
7835 return ERR_PTR(-ENOMEM);
7836
7837 /* Bind the cgroup to task_group object we just created */
Paul Menage2b01dfe2007-10-24 18:23:50 +02007838 tg->css.cgroup = cgrp;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007839
7840 return &tg->css;
7841}
7842
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007843static void
7844cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007845{
Paul Menage2b01dfe2007-10-24 18:23:50 +02007846 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007847
7848 sched_destroy_group(tg);
7849}
7850
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007851static int
7852cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7853 struct task_struct *tsk)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007854{
7855 /* We don't support RT-tasks being in separate groups */
7856 if (tsk->sched_class != &fair_sched_class)
7857 return -EINVAL;
7858
7859 return 0;
7860}
7861
7862static void
Paul Menage2b01dfe2007-10-24 18:23:50 +02007863cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007864 struct cgroup *old_cont, struct task_struct *tsk)
7865{
7866 sched_move_task(tsk);
7867}
7868
Paul Menage2b01dfe2007-10-24 18:23:50 +02007869static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype,
7870 u64 shareval)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007871{
Paul Menage2b01dfe2007-10-24 18:23:50 +02007872 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007873}
7874
Paul Menage2b01dfe2007-10-24 18:23:50 +02007875static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007876{
Paul Menage2b01dfe2007-10-24 18:23:50 +02007877 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007878
7879 return (u64) tg->shares;
7880}
7881
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007882static int cpu_rt_ratio_write_uint(struct cgroup *cgrp, struct cftype *cftype,
7883 u64 rt_ratio_val)
7884{
7885 return sched_group_set_rt_ratio(cgroup_tg(cgrp), rt_ratio_val);
7886}
7887
7888static u64 cpu_rt_ratio_read_uint(struct cgroup *cgrp, struct cftype *cft)
7889{
7890 struct task_group *tg = cgroup_tg(cgrp);
7891
7892 return (u64) tg->rt_ratio;
7893}
7894
Paul Menagefe5c7cc2007-10-29 21:18:11 +01007895static struct cftype cpu_files[] = {
7896 {
7897 .name = "shares",
7898 .read_uint = cpu_shares_read_uint,
7899 .write_uint = cpu_shares_write_uint,
7900 },
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007901 {
7902 .name = "rt_ratio",
7903 .read_uint = cpu_rt_ratio_read_uint,
7904 .write_uint = cpu_rt_ratio_write_uint,
7905 },
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007906};
7907
7908static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
7909{
Paul Menagefe5c7cc2007-10-29 21:18:11 +01007910 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007911}
7912
7913struct cgroup_subsys cpu_cgroup_subsys = {
Ingo Molnar38605ca2007-10-29 21:18:11 +01007914 .name = "cpu",
7915 .create = cpu_cgroup_create,
7916 .destroy = cpu_cgroup_destroy,
7917 .can_attach = cpu_cgroup_can_attach,
7918 .attach = cpu_cgroup_attach,
7919 .populate = cpu_cgroup_populate,
7920 .subsys_id = cpu_cgroup_subsys_id,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007921 .early_init = 1,
7922};
7923
7924#endif /* CONFIG_FAIR_CGROUP_SCHED */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007925
7926#ifdef CONFIG_CGROUP_CPUACCT
7927
7928/*
7929 * CPU accounting code for task groups.
7930 *
7931 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
7932 * (balbir@in.ibm.com).
7933 */
7934
7935/* track cpu usage of a group of tasks */
7936struct cpuacct {
7937 struct cgroup_subsys_state css;
7938 /* cpuusage holds pointer to a u64-type object on every cpu */
7939 u64 *cpuusage;
7940};
7941
7942struct cgroup_subsys cpuacct_subsys;
7943
7944/* return cpu accounting group corresponding to this container */
7945static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
7946{
7947 return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
7948 struct cpuacct, css);
7949}
7950
7951/* return cpu accounting group to which this task belongs */
7952static inline struct cpuacct *task_ca(struct task_struct *tsk)
7953{
7954 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
7955 struct cpuacct, css);
7956}
7957
7958/* create a new cpu accounting group */
7959static struct cgroup_subsys_state *cpuacct_create(
7960 struct cgroup_subsys *ss, struct cgroup *cont)
7961{
7962 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
7963
7964 if (!ca)
7965 return ERR_PTR(-ENOMEM);
7966
7967 ca->cpuusage = alloc_percpu(u64);
7968 if (!ca->cpuusage) {
7969 kfree(ca);
7970 return ERR_PTR(-ENOMEM);
7971 }
7972
7973 return &ca->css;
7974}
7975
7976/* destroy an existing cpu accounting group */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007977static void
7978cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007979{
7980 struct cpuacct *ca = cgroup_ca(cont);
7981
7982 free_percpu(ca->cpuusage);
7983 kfree(ca);
7984}
7985
7986/* return total cpu usage (in nanoseconds) of a group */
7987static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
7988{
7989 struct cpuacct *ca = cgroup_ca(cont);
7990 u64 totalcpuusage = 0;
7991 int i;
7992
7993 for_each_possible_cpu(i) {
7994 u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
7995
7996 /*
7997 * Take rq->lock to make 64-bit addition safe on 32-bit
7998 * platforms.
7999 */
8000 spin_lock_irq(&cpu_rq(i)->lock);
8001 totalcpuusage += *cpuusage;
8002 spin_unlock_irq(&cpu_rq(i)->lock);
8003 }
8004
8005 return totalcpuusage;
8006}
8007
8008static struct cftype files[] = {
8009 {
8010 .name = "usage",
8011 .read_uint = cpuusage_read,
8012 },
8013};
8014
8015static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
8016{
8017 return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
8018}
8019
8020/*
8021 * charge this task's execution time to its accounting group.
8022 *
8023 * called with rq->lock held.
8024 */
8025static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
8026{
8027 struct cpuacct *ca;
8028
8029 if (!cpuacct_subsys.active)
8030 return;
8031
8032 ca = task_ca(tsk);
8033 if (ca) {
8034 u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
8035
8036 *cpuusage += cputime;
8037 }
8038}
8039
8040struct cgroup_subsys cpuacct_subsys = {
8041 .name = "cpuacct",
8042 .create = cpuacct_create,
8043 .destroy = cpuacct_destroy,
8044 .populate = cpuacct_populate,
8045 .subsys_id = cpuacct_subsys_id,
8046};
8047#endif /* CONFIG_CGROUP_CPUACCT */