blob: 6cbff6bd1a6049c7a5eda06c4cec31e7736b0a7c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
Ingo Molnarc31f2e82007-07-09 18:52:01 +020019 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
Ingo Molnarb9131762008-01-25 21:08:19 +010025 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/highmem.h>
35#include <linux/smp_lock.h>
36#include <asm/mmu_context.h>
37#include <linux/interrupt.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080038#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/completion.h>
40#include <linux/kernel_stat.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070041#include <linux/debug_locks.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020042#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/security.h>
44#include <linux/notifier.h>
45#include <linux/profile.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080046#include <linux/freezer.h>
akpm@osdl.org198e2f12006-01-12 01:05:30 -080047#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/blkdev.h>
49#include <linux/delay.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070050#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/smp.h>
52#include <linux/threads.h>
53#include <linux/timer.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/cpuset.h>
57#include <linux/percpu.h>
Alexey Dobriyanb5aadf72008-10-06 13:23:43 +040058#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <linux/seq_file.h>
Tejun Heo969c7922010-05-06 18:49:21 +020060#include <linux/stop_machine.h>
Nick Piggine692ab52007-07-26 13:40:43 +020061#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#include <linux/syscalls.h>
63#include <linux/times.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070064#include <linux/tsacct_kern.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080065#include <linux/kprobes.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070066#include <linux/delayacct.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020067#include <linux/unistd.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020068#include <linux/pagemap.h>
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010069#include <linux/hrtimer.h>
Reynes Philippe30914a52008-03-17 16:19:05 -070070#include <linux/tick.h>
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020071#include <linux/debugfs.h>
72#include <linux/ctype.h>
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020073#include <linux/ftrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090074#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Eric Dumazet5517d862007-05-08 00:32:57 -070076#include <asm/tlb.h>
Satyam Sharma838225b2007-10-24 18:23:50 +020077#include <asm/irq_regs.h>
Gerald Schaefer335d7af2010-11-22 15:47:36 +010078#include <asm/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Gregory Haskins6e0534f2008-05-12 21:21:01 +020080#include "sched_cpupri.h"
Tejun Heo21aa9af2010-06-08 21:40:37 +020081#include "workqueue_sched.h"
Mike Galbraith5091faa2010-11-30 14:18:03 +010082#include "sched_autogroup.h"
Gregory Haskins6e0534f2008-05-12 21:21:01 +020083
Steven Rostedta8d154b2009-04-10 09:36:00 -040084#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040085#include <trace/events/sched.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040086
Linus Torvalds1da177e2005-04-16 15:20:36 -070087/*
88 * Convert user-nice values [ -20 ... 0 ... 19 ]
89 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
90 * and back.
91 */
92#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
93#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
94#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
95
96/*
97 * 'User priority' is the nice value converted to something we
98 * can work with better when scaling various scheduler parameters,
99 * it's a [ 0 ... 39 ] range.
100 */
101#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
102#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
103#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
104
105/*
Ingo Molnard7876a02008-01-25 21:08:19 +0100106 * Helpers for converting nanosecond timing to jiffy resolution
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 */
Eric Dumazetd6322fa2007-11-09 22:39:38 +0100108#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200110#define NICE_0_LOAD SCHED_LOAD_SCALE
111#define NICE_0_SHIFT SCHED_LOAD_SHIFT
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/*
114 * These are the 'tuning knobs' of the scheduler:
115 *
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +0200116 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 * Timeslices get refilled after they expire.
118 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#define DEF_TIMESLICE (100 * HZ / 1000)
Peter Williams2dd73a42006-06-27 02:54:34 -0700120
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200121/*
122 * single value that denotes runtime == period, ie unlimited time.
123 */
124#define RUNTIME_INF ((u64)~0ULL)
125
Ingo Molnare05606d2007-07-09 18:51:59 +0200126static inline int rt_policy(int policy)
127{
Roel Kluin3f33a7c2008-05-13 23:44:11 +0200128 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
Ingo Molnare05606d2007-07-09 18:51:59 +0200129 return 1;
130 return 0;
131}
132
133static inline int task_has_rt_policy(struct task_struct *p)
134{
135 return rt_policy(p->policy);
136}
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/*
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200139 * This is the priority-queue data structure of the RT scheduling class:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 */
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200141struct rt_prio_array {
142 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
143 struct list_head queue[MAX_RT_PRIO];
144};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200146struct rt_bandwidth {
Ingo Molnarea736ed2008-03-25 13:51:45 +0100147 /* nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100148 raw_spinlock_t rt_runtime_lock;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100149 ktime_t rt_period;
150 u64 rt_runtime;
151 struct hrtimer rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200152};
153
154static struct rt_bandwidth def_rt_bandwidth;
155
156static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
157
158static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
159{
160 struct rt_bandwidth *rt_b =
161 container_of(timer, struct rt_bandwidth, rt_period_timer);
162 ktime_t now;
163 int overrun;
164 int idle = 0;
165
166 for (;;) {
167 now = hrtimer_cb_get_time(timer);
168 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
169
170 if (!overrun)
171 break;
172
173 idle = do_sched_rt_period_timer(rt_b, overrun);
174 }
175
176 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
177}
178
179static
180void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
181{
182 rt_b->rt_period = ns_to_ktime(period);
183 rt_b->rt_runtime = runtime;
184
Thomas Gleixner0986b112009-11-17 15:32:06 +0100185 raw_spin_lock_init(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200186
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200187 hrtimer_init(&rt_b->rt_period_timer,
188 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
189 rt_b->rt_period_timer.function = sched_rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200190}
191
Krzysztof Heltc8bfff62008-09-05 23:46:19 +0200192static inline int rt_bandwidth_enabled(void)
193{
194 return sysctl_sched_rt_runtime >= 0;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200195}
196
197static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
198{
199 ktime_t now;
200
Hiroshi Shimamotocac64d02009-02-25 09:59:26 -0800201 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200202 return;
203
204 if (hrtimer_active(&rt_b->rt_period_timer))
205 return;
206
Thomas Gleixner0986b112009-11-17 15:32:06 +0100207 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200208 for (;;) {
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100209 unsigned long delta;
210 ktime_t soft, hard;
211
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200212 if (hrtimer_active(&rt_b->rt_period_timer))
213 break;
214
215 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
216 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100217
218 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
219 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
220 delta = ktime_to_ns(ktime_sub(hard, soft));
221 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530222 HRTIMER_MODE_ABS_PINNED, 0);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200223 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100224 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200225}
226
227#ifdef CONFIG_RT_GROUP_SCHED
228static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
229{
230 hrtimer_cancel(&rt_b->rt_period_timer);
231}
232#endif
233
Heiko Carstens712555e2008-04-28 11:33:07 +0200234/*
235 * sched_domains_mutex serializes calls to arch_init_sched_domains,
236 * detach_destroy_domains and partition_sched_domains.
237 */
238static DEFINE_MUTEX(sched_domains_mutex);
239
Dhaval Giani7c941432010-01-20 13:26:18 +0100240#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200241
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700242#include <linux/cgroup.h>
243
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200244struct cfs_rq;
245
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100246static LIST_HEAD(task_groups);
247
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200248/* task group related information */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200249struct task_group {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700250 struct cgroup_subsys_state css;
Arun R Bharadwaj6c415b92008-12-01 20:49:05 +0530251
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100252#ifdef CONFIG_FAIR_GROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200253 /* schedulable entities of this group on each cpu */
254 struct sched_entity **se;
255 /* runqueue "owned" by this group on each cpu */
256 struct cfs_rq **cfs_rq;
257 unsigned long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800258
259 atomic_t load_weight;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100260#endif
261
262#ifdef CONFIG_RT_GROUP_SCHED
263 struct sched_rt_entity **rt_se;
264 struct rt_rq **rt_rq;
265
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200266 struct rt_bandwidth rt_bandwidth;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100267#endif
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100268
Srivatsa Vaddagiriae8393e2007-10-29 21:18:11 +0100269 struct rcu_head rcu;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100270 struct list_head list;
Peter Zijlstraf473aa52008-04-19 19:45:00 +0200271
272 struct task_group *parent;
273 struct list_head siblings;
274 struct list_head children;
Mike Galbraith5091faa2010-11-30 14:18:03 +0100275
276#ifdef CONFIG_SCHED_AUTOGROUP
277 struct autogroup *autogroup;
278#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200279};
280
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800281/* task_group_lock serializes the addition/removal of task groups */
Peter Zijlstra8ed36992008-02-13 15:45:39 +0100282static DEFINE_SPINLOCK(task_group_lock);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100283
Cyrill Gorcunove9036b32009-10-26 22:24:14 +0300284#ifdef CONFIG_FAIR_GROUP_SCHED
285
Yong Zhang07e06b02011-01-07 15:17:36 +0800286# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200287
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800288/*
Lai Jiangshan2e084782008-06-12 16:42:58 +0800289 * A weight of 0 or 1 can cause arithmetics problems.
290 * A weight of a cfs_rq is the sum of weights of which entities
291 * are queued on this cfs_rq, so a weight of a entity should not be
292 * too large, so as the shares value of a task group.
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800293 * (The default weight is 1024 - so there's no practical
294 * limitation from this.)
295 */
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200296#define MIN_SHARES 2
Lai Jiangshan2e084782008-06-12 16:42:58 +0800297#define MAX_SHARES (1UL << 18)
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200298
Yong Zhang07e06b02011-01-07 15:17:36 +0800299static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100300#endif
301
302/* Default task group.
303 * Every task in system belong to this group at bootup.
304 */
Yong Zhang07e06b02011-01-07 15:17:36 +0800305struct task_group root_task_group;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200306
Dhaval Giani7c941432010-01-20 13:26:18 +0100307#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200308
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200309/* CFS-related fields in a runqueue */
310struct cfs_rq {
311 struct load_weight load;
312 unsigned long nr_running;
313
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200314 u64 exec_clock;
Ingo Molnare9acbff2007-10-15 17:00:04 +0200315 u64 min_vruntime;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200316
317 struct rb_root tasks_timeline;
318 struct rb_node *rb_leftmost;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +0200319
320 struct list_head tasks;
321 struct list_head *balance_iterator;
322
323 /*
324 * 'curr' points to currently running entity on this cfs_rq.
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200325 * It is set to NULL otherwise (i.e when none are currently running).
326 */
Peter Zijlstra47932412008-11-04 21:25:09 +0100327 struct sched_entity *curr, *next, *last;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200328
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100329 unsigned int nr_spread_over;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200330
Ingo Molnar62160e32007-10-15 17:00:03 +0200331#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200332 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
333
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100334 /*
335 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200336 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
337 * (like users, containers etc.)
338 *
339 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
340 * list is used during load balance.
341 */
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800342 int on_list;
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100343 struct list_head leaf_cfs_rq_list;
344 struct task_group *tg; /* group that "owns" this runqueue */
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200345
346#ifdef CONFIG_SMP
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200347 /*
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200348 * the part of load.weight contributed by tasks
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200349 */
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200350 unsigned long task_weight;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200351
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200352 /*
353 * h_load = weight * f(tg)
354 *
355 * Where f(tg) is the recursive weight fraction assigned to
356 * this group.
357 */
358 unsigned long h_load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200359
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200360 /*
Paul Turner3b3d1902010-11-15 15:47:08 -0800361 * Maintaining per-cpu shares distribution for group scheduling
362 *
363 * load_stamp is the last time we updated the load average
364 * load_last is the last time we updated the load average and saw load
365 * load_unacc_exec_time is currently unaccounted execution time
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200366 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800367 u64 load_avg;
368 u64 load_period;
Paul Turner3b3d1902010-11-15 15:47:08 -0800369 u64 load_stamp, load_last, load_unacc_exec_time;
Peter Zijlstraf1d239f2008-06-27 13:41:38 +0200370
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800371 unsigned long load_contribution;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200372#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200373#endif
374};
375
376/* Real-Time classes' related field in a runqueue: */
377struct rt_rq {
378 struct rt_prio_array active;
Steven Rostedt63489e42008-01-25 21:08:03 +0100379 unsigned long rt_nr_running;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100380#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -0500381 struct {
382 int curr; /* highest queued rt task prio */
Gregory Haskins398a1532009-01-14 09:10:04 -0500383#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500384 int next; /* next highest */
Gregory Haskins398a1532009-01-14 09:10:04 -0500385#endif
Gregory Haskinse864c492008-12-29 09:39:49 -0500386 } highest_prio;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100387#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100388#ifdef CONFIG_SMP
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100389 unsigned long rt_nr_migratory;
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200390 unsigned long rt_nr_total;
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +0100391 int overloaded;
Gregory Haskins917b6272008-12-29 09:39:53 -0500392 struct plist_head pushable_tasks;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100393#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100394 int rt_throttled;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100395 u64 rt_time;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200396 u64 rt_runtime;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100397 /* Nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100398 raw_spinlock_t rt_runtime_lock;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100399
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100400#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100401 unsigned long rt_nr_boosted;
402
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100403 struct rq *rq;
404 struct list_head leaf_rt_rq_list;
405 struct task_group *tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100406#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200407};
408
Gregory Haskins57d885f2008-01-25 21:08:18 +0100409#ifdef CONFIG_SMP
410
411/*
412 * We add the notion of a root-domain which will be used to define per-domain
Ingo Molnar0eab9142008-01-25 21:08:19 +0100413 * variables. Each exclusive cpuset essentially defines an island domain by
414 * fully partitioning the member cpus from any other cpuset. Whenever a new
Gregory Haskins57d885f2008-01-25 21:08:18 +0100415 * exclusive cpuset is created, we also create and attach a new root-domain
416 * object.
417 *
Gregory Haskins57d885f2008-01-25 21:08:18 +0100418 */
419struct root_domain {
420 atomic_t refcount;
Rusty Russellc6c49272008-11-25 02:35:05 +1030421 cpumask_var_t span;
422 cpumask_var_t online;
Gregory Haskins637f5082008-01-25 21:08:18 +0100423
Ingo Molnar0eab9142008-01-25 21:08:19 +0100424 /*
Gregory Haskins637f5082008-01-25 21:08:18 +0100425 * The "RT overload" flag: it gets set if a CPU has more than
426 * one runnable RT task.
427 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030428 cpumask_var_t rto_mask;
Ingo Molnar0eab9142008-01-25 21:08:19 +0100429 atomic_t rto_count;
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200430 struct cpupri cpupri;
Gregory Haskins57d885f2008-01-25 21:08:18 +0100431};
432
Gregory Haskinsdc938522008-01-25 21:08:26 +0100433/*
434 * By default the system creates a single root-domain with all cpus as
435 * members (mimicking the global state we have today).
436 */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100437static struct root_domain def_root_domain;
438
Christian Dietriched2d3722010-09-06 16:37:05 +0200439#endif /* CONFIG_SMP */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100440
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200441/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 * This is the main, per-CPU runqueue data structure.
443 *
444 * Locking rule: those places that want to lock multiple runqueues
445 * (such as the load balancing or the thread migration code), lock
446 * acquire operations must be ordered by ascending &runqueue.
447 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700448struct rq {
Ingo Molnard8016492007-10-18 21:32:55 +0200449 /* runqueue lock: */
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100450 raw_spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452 /*
453 * nr_running and cpu_load should be in the same cacheline because
454 * remote CPUs use both these fields when doing load calculation.
455 */
456 unsigned long nr_running;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200457 #define CPU_LOAD_IDX_MAX 5
458 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -0700459 unsigned long last_load_update_tick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700460#ifdef CONFIG_NO_HZ
Mike Galbraith39c0cbe2010-03-11 17:17:13 +0100461 u64 nohz_stamp;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700462 unsigned char nohz_balance_kick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700463#endif
Mike Galbraitha64692a2010-03-11 17:16:20 +0100464 unsigned int skip_clock_update;
465
Ingo Molnard8016492007-10-18 21:32:55 +0200466 /* capture load from *all* tasks on this cpu: */
467 struct load_weight load;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200468 unsigned long nr_load_updates;
469 u64 nr_switches;
470
471 struct cfs_rq cfs;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100472 struct rt_rq rt;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100473
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200474#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnard8016492007-10-18 21:32:55 +0200475 /* list of leaf cfs_rq on this cpu: */
476 struct list_head leaf_cfs_rq_list;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100477#endif
478#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100479 struct list_head leaf_rt_rq_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
482 /*
483 * This is part of a global counter where only the total sum
484 * over all CPUs matters. A task can increase this counter on
485 * one CPU and if it got migrated afterwards it may decrease
486 * it on another CPU. Always updated under the runqueue lock:
487 */
488 unsigned long nr_uninterruptible;
489
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200490 struct task_struct *curr, *idle, *stop;
Christoph Lameterc9819f42006-12-10 02:20:25 -0800491 unsigned long next_balance;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 struct mm_struct *prev_mm;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200493
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200494 u64 clock;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700495 u64 clock_task;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 atomic_t nr_iowait;
498
499#ifdef CONFIG_SMP
Ingo Molnar0eab9142008-01-25 21:08:19 +0100500 struct root_domain *rd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 struct sched_domain *sd;
502
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +0200503 unsigned long cpu_power;
504
Henrik Austada0a522c2009-02-13 20:35:45 +0100505 unsigned char idle_at_tick;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 /* For active balancing */
Gregory Haskins3f029d32009-07-29 11:08:47 -0400507 int post_schedule;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 int active_balance;
509 int push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +0200510 struct cpu_stop_work active_balance_work;
Ingo Molnard8016492007-10-18 21:32:55 +0200511 /* cpu of this runqueue: */
512 int cpu;
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400513 int online;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Peter Zijlstraa8a51d52008-06-27 13:41:26 +0200515 unsigned long avg_load_per_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200517 u64 rt_avg;
518 u64 age_stamp;
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100519 u64 idle_stamp;
520 u64 avg_idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521#endif
522
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700523#ifdef CONFIG_IRQ_TIME_ACCOUNTING
524 u64 prev_irq_time;
525#endif
526
Thomas Gleixnerdce48a82009-04-11 10:43:41 +0200527 /* calc_load related fields */
528 unsigned long calc_load_update;
529 long calc_load_active;
530
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100531#ifdef CONFIG_SCHED_HRTICK
Peter Zijlstra31656512008-07-18 18:01:23 +0200532#ifdef CONFIG_SMP
533 int hrtick_csd_pending;
534 struct call_single_data hrtick_csd;
535#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100536 struct hrtimer hrtick_timer;
537#endif
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539#ifdef CONFIG_SCHEDSTATS
540 /* latency stats */
541 struct sched_info rq_sched_info;
Ken Chen9c2c4802008-12-16 23:41:22 -0800542 unsigned long long rq_cpu_time;
543 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
545 /* sys_sched_yield() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200546 unsigned int yld_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
548 /* schedule() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200549 unsigned int sched_switch;
550 unsigned int sched_count;
551 unsigned int sched_goidle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
553 /* try_to_wake_up() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200554 unsigned int ttwu_count;
555 unsigned int ttwu_local;
Ingo Molnarb8efb562007-10-15 17:00:10 +0200556
557 /* BKL stats */
Ken Chen480b9432007-10-18 21:32:56 +0200558 unsigned int bkl_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559#endif
560};
561
Fenghua Yuf34e3b62007-07-19 01:48:13 -0700562static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
Mike Galbraitha64692a2010-03-11 17:16:20 +0100564
Peter Zijlstra1e5a7402010-10-31 12:37:04 +0100565static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
Ingo Molnardd41f592007-07-09 18:51:59 +0200566
Christoph Lameter0a2966b2006-09-25 23:30:51 -0700567static inline int cpu_of(struct rq *rq)
568{
569#ifdef CONFIG_SMP
570 return rq->cpu;
571#else
572 return 0;
573#endif
574}
575
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800576#define rcu_dereference_check_sched_domain(p) \
Paul E. McKenneyd11c5632010-02-22 17:04:50 -0800577 rcu_dereference_check((p), \
578 rcu_read_lock_sched_held() || \
579 lockdep_is_held(&sched_domains_mutex))
580
Ingo Molnar20d315d2007-07-09 18:51:58 +0200581/*
Nick Piggin674311d2005-06-25 14:57:27 -0700582 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -0700583 * See detach_destroy_domains: synchronize_sched for details.
Nick Piggin674311d2005-06-25 14:57:27 -0700584 *
585 * The domain tree of any CPU may only be accessed from within
586 * preempt-disabled sections.
587 */
Ingo Molnar48f24c42006-07-03 00:25:40 -0700588#define for_each_domain(cpu, __sd) \
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800589 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
591#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
592#define this_rq() (&__get_cpu_var(runqueues))
593#define task_rq(p) cpu_rq(task_cpu(p))
594#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Hitoshi Mitake54d35f22009-06-29 14:44:57 +0900595#define raw_rq() (&__raw_get_cpu_var(runqueues))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200597#ifdef CONFIG_CGROUP_SCHED
598
599/*
600 * Return the group to which this tasks belongs.
601 *
602 * We use task_subsys_state_check() and extend the RCU verification
603 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
604 * holds that lock for each task it moves into the cgroup. Therefore
605 * by holding that lock, we pin the task to the current cgroup.
606 */
607static inline struct task_group *task_group(struct task_struct *p)
608{
Mike Galbraith5091faa2010-11-30 14:18:03 +0100609 struct task_group *tg;
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200610 struct cgroup_subsys_state *css;
611
612 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
613 lockdep_is_held(&task_rq(p)->lock));
Mike Galbraith5091faa2010-11-30 14:18:03 +0100614 tg = container_of(css, struct task_group, css);
615
616 return autogroup_task_group(p, tg);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200617}
618
619/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
620static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
621{
622#ifdef CONFIG_FAIR_GROUP_SCHED
623 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
624 p->se.parent = task_group(p)->se[cpu];
625#endif
626
627#ifdef CONFIG_RT_GROUP_SCHED
628 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
629 p->rt.parent = task_group(p)->rt_se[cpu];
630#endif
631}
632
633#else /* CONFIG_CGROUP_SCHED */
634
635static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
636static inline struct task_group *task_group(struct task_struct *p)
637{
638 return NULL;
639}
640
641#endif /* CONFIG_CGROUP_SCHED */
642
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100643static void update_rq_clock_task(struct rq *rq, s64 delta);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700644
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100645static void update_rq_clock(struct rq *rq)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200646{
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100647 s64 delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700648
Mike Galbraithf26f9af2010-12-08 11:05:42 +0100649 if (rq->skip_clock_update)
650 return;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700651
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100652 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
653 rq->clock += delta;
654 update_rq_clock_task(rq, delta);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200655}
656
Ingo Molnare436d802007-07-19 21:28:35 +0200657/*
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200658 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
659 */
660#ifdef CONFIG_SCHED_DEBUG
661# define const_debug __read_mostly
662#else
663# define const_debug static const
664#endif
665
Ingo Molnar017730c2008-05-12 21:20:52 +0200666/**
667 * runqueue_is_locked
Randy Dunlape17b38b2009-10-11 19:12:00 -0700668 * @cpu: the processor in question.
Ingo Molnar017730c2008-05-12 21:20:52 +0200669 *
670 * Returns true if the current cpu runqueue is locked.
671 * This interface allows printk to be called with the runqueue lock
672 * held and know whether or not it is OK to wake up the klogd.
673 */
Andrew Morton89f19f02009-09-19 11:55:44 -0700674int runqueue_is_locked(int cpu)
Ingo Molnar017730c2008-05-12 21:20:52 +0200675{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100676 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
Ingo Molnar017730c2008-05-12 21:20:52 +0200677}
678
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200679/*
680 * Debugging: various feature bits
681 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200682
683#define SCHED_FEAT(name, enabled) \
684 __SCHED_FEAT_##name ,
685
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200686enum {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200687#include "sched_features.h"
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200688};
689
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200690#undef SCHED_FEAT
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200691
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200692#define SCHED_FEAT(name, enabled) \
693 (1UL << __SCHED_FEAT_##name) * enabled |
694
695const_debug unsigned int sysctl_sched_features =
696#include "sched_features.h"
697 0;
698
699#undef SCHED_FEAT
700
701#ifdef CONFIG_SCHED_DEBUG
702#define SCHED_FEAT(name, enabled) \
703 #name ,
704
Harvey Harrison983ed7a2008-04-24 18:17:55 -0700705static __read_mostly char *sched_feat_names[] = {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200706#include "sched_features.h"
707 NULL
708};
709
710#undef SCHED_FEAT
711
Li Zefan34f3a812008-10-30 15:23:32 +0800712static int sched_feat_show(struct seq_file *m, void *v)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200713{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200714 int i;
715
716 for (i = 0; sched_feat_names[i]; i++) {
Li Zefan34f3a812008-10-30 15:23:32 +0800717 if (!(sysctl_sched_features & (1UL << i)))
718 seq_puts(m, "NO_");
719 seq_printf(m, "%s ", sched_feat_names[i]);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200720 }
Li Zefan34f3a812008-10-30 15:23:32 +0800721 seq_puts(m, "\n");
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200722
Li Zefan34f3a812008-10-30 15:23:32 +0800723 return 0;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200724}
725
726static ssize_t
727sched_feat_write(struct file *filp, const char __user *ubuf,
728 size_t cnt, loff_t *ppos)
729{
730 char buf[64];
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400731 char *cmp;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200732 int neg = 0;
733 int i;
734
735 if (cnt > 63)
736 cnt = 63;
737
738 if (copy_from_user(&buf, ubuf, cnt))
739 return -EFAULT;
740
741 buf[cnt] = 0;
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400742 cmp = strstrip(buf);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200743
Hillf Danton524429c2011-01-06 20:58:12 +0800744 if (strncmp(cmp, "NO_", 3) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200745 neg = 1;
746 cmp += 3;
747 }
748
749 for (i = 0; sched_feat_names[i]; i++) {
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400750 if (strcmp(cmp, sched_feat_names[i]) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200751 if (neg)
752 sysctl_sched_features &= ~(1UL << i);
753 else
754 sysctl_sched_features |= (1UL << i);
755 break;
756 }
757 }
758
759 if (!sched_feat_names[i])
760 return -EINVAL;
761
Jan Blunck42994722009-11-20 17:40:37 +0100762 *ppos += cnt;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200763
764 return cnt;
765}
766
Li Zefan34f3a812008-10-30 15:23:32 +0800767static int sched_feat_open(struct inode *inode, struct file *filp)
768{
769 return single_open(filp, sched_feat_show, NULL);
770}
771
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700772static const struct file_operations sched_feat_fops = {
Li Zefan34f3a812008-10-30 15:23:32 +0800773 .open = sched_feat_open,
774 .write = sched_feat_write,
775 .read = seq_read,
776 .llseek = seq_lseek,
777 .release = single_release,
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200778};
779
780static __init int sched_init_debug(void)
781{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200782 debugfs_create_file("sched_features", 0644, NULL, NULL,
783 &sched_feat_fops);
784
785 return 0;
786}
787late_initcall(sched_init_debug);
788
789#endif
790
791#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200792
793/*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +0100794 * Number of tasks to iterate in a single balance run.
795 * Limited because this is done with IRQs disabled.
796 */
797const_debug unsigned int sysctl_sched_nr_migrate = 32;
798
799/*
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200800 * period over which we average the RT time consumption, measured
801 * in ms.
802 *
803 * default: 1s
804 */
805const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
806
807/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100808 * period over which we measure -rt task cpu usage in us.
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100809 * default: 1s
810 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100811unsigned int sysctl_sched_rt_period = 1000000;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100812
Ingo Molnar6892b752008-02-13 14:02:36 +0100813static __read_mostly int scheduler_running;
814
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100815/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100816 * part of the period that we allow rt tasks to run in us.
817 * default: 0.95s
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100818 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100819int sysctl_sched_rt_runtime = 950000;
820
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200821static inline u64 global_rt_period(void)
822{
823 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
824}
825
826static inline u64 global_rt_runtime(void)
827{
roel kluine26873b2008-07-22 16:51:15 -0400828 if (sysctl_sched_rt_runtime < 0)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200829 return RUNTIME_INF;
830
831 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
832}
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834#ifndef prepare_arch_switch
Nick Piggin4866cde2005-06-25 14:57:23 -0700835# define prepare_arch_switch(next) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700837#ifndef finish_arch_switch
838# define finish_arch_switch(prev) do { } while (0)
839#endif
840
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100841static inline int task_current(struct rq *rq, struct task_struct *p)
842{
843 return rq->curr == p;
844}
845
Nick Piggin4866cde2005-06-25 14:57:23 -0700846#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar70b97a72006-07-03 00:25:42 -0700847static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700848{
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100849 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700850}
851
Ingo Molnar70b97a72006-07-03 00:25:42 -0700852static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700853{
854}
855
Ingo Molnar70b97a72006-07-03 00:25:42 -0700856static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700857{
Ingo Molnarda04c032005-09-13 11:17:59 +0200858#ifdef CONFIG_DEBUG_SPINLOCK
859 /* this is a valid case when another task releases the spinlock */
860 rq->lock.owner = current;
861#endif
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700862 /*
863 * If we are tracking spinlock dependencies then we have to
864 * fix up the runqueue lock - which gets 'carried over' from
865 * prev into current:
866 */
867 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
868
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100869 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700870}
871
872#else /* __ARCH_WANT_UNLOCKED_CTXSW */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700873static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700874{
875#ifdef CONFIG_SMP
876 return p->oncpu;
877#else
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100878 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700879#endif
880}
881
Ingo Molnar70b97a72006-07-03 00:25:42 -0700882static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700883{
884#ifdef CONFIG_SMP
885 /*
886 * We can optimise this out completely for !SMP, because the
887 * SMP rebalancing from interrupt is the only thing that cares
888 * here.
889 */
890 next->oncpu = 1;
891#endif
892#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100893 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700894#else
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100895 raw_spin_unlock(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700896#endif
897}
898
Ingo Molnar70b97a72006-07-03 00:25:42 -0700899static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700900{
901#ifdef CONFIG_SMP
902 /*
903 * After ->oncpu is cleared, the task can be moved to a different CPU.
904 * We must ensure this doesn't happen until the switch is completely
905 * finished.
906 */
907 smp_wmb();
908 prev->oncpu = 0;
909#endif
910#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
911 local_irq_enable();
912#endif
913}
914#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
916/*
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100917 * Check whether the task is waking, we use this to synchronize ->cpus_allowed
918 * against ttwu().
Peter Zijlstra0970d292010-02-15 14:45:54 +0100919 */
920static inline int task_is_waking(struct task_struct *p)
921{
Peter Zijlstra0017d732010-03-24 18:34:10 +0100922 return unlikely(p->state == TASK_WAKING);
Peter Zijlstra0970d292010-02-15 14:45:54 +0100923}
924
925/*
Ingo Molnarb29739f2006-06-27 02:54:51 -0700926 * __task_rq_lock - lock the runqueue a given task resides on.
927 * Must be called interrupts disabled.
928 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700929static inline struct rq *__task_rq_lock(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700930 __acquires(rq->lock)
931{
Peter Zijlstra0970d292010-02-15 14:45:54 +0100932 struct rq *rq;
933
Andi Kleen3a5c3592007-10-15 17:00:14 +0200934 for (;;) {
Peter Zijlstra0970d292010-02-15 14:45:54 +0100935 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100936 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100937 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200938 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100939 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700940 }
Ingo Molnarb29739f2006-06-27 02:54:51 -0700941}
942
943/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 * task_rq_lock - lock the runqueue a given task resides on and disable
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100945 * interrupts. Note the ordering: we can safely lookup the task_rq without
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 * explicitly disabling preemption.
947 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700948static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 __acquires(rq->lock)
950{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700951 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Andi Kleen3a5c3592007-10-15 17:00:14 +0200953 for (;;) {
954 local_irq_save(*flags);
955 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100956 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100957 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200958 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100959 raw_spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961}
962
Alexey Dobriyana9957442007-10-15 17:00:13 +0200963static void __task_rq_unlock(struct rq *rq)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700964 __releases(rq->lock)
965{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100966 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700967}
968
Ingo Molnar70b97a72006-07-03 00:25:42 -0700969static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 __releases(rq->lock)
971{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100972 raw_spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973}
974
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975/*
Robert P. J. Daycc2a73b2006-12-10 02:20:00 -0800976 * this_rq_lock - lock this runqueue and disable interrupts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200978static struct rq *this_rq_lock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 __acquires(rq->lock)
980{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700981 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 local_irq_disable();
984 rq = this_rq();
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100985 raw_spin_lock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 return rq;
988}
989
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100990#ifdef CONFIG_SCHED_HRTICK
991/*
992 * Use HR-timers to deliver accurate preemption points.
993 *
994 * Its all a bit involved since we cannot program an hrt while holding the
995 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
996 * reschedule event.
997 *
998 * When we get rescheduled we reprogram the hrtick_timer outside of the
999 * rq->lock.
1000 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001001
1002/*
1003 * Use hrtick when:
1004 * - enabled by features
1005 * - hrtimer is actually high res
1006 */
1007static inline int hrtick_enabled(struct rq *rq)
1008{
1009 if (!sched_feat(HRTICK))
1010 return 0;
Ingo Molnarba420592008-07-20 11:02:06 +02001011 if (!cpu_active(cpu_of(rq)))
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001012 return 0;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001013 return hrtimer_is_hres_active(&rq->hrtick_timer);
1014}
1015
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001016static void hrtick_clear(struct rq *rq)
1017{
1018 if (hrtimer_active(&rq->hrtick_timer))
1019 hrtimer_cancel(&rq->hrtick_timer);
1020}
1021
1022/*
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001023 * High-resolution timer tick.
1024 * Runs from hardirq context with interrupts disabled.
1025 */
1026static enum hrtimer_restart hrtick(struct hrtimer *timer)
1027{
1028 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1029
1030 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1031
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001032 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001033 update_rq_clock(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001034 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001035 raw_spin_unlock(&rq->lock);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001036
1037 return HRTIMER_NORESTART;
1038}
1039
Rabin Vincent95e904c2008-05-11 05:55:33 +05301040#ifdef CONFIG_SMP
Peter Zijlstra31656512008-07-18 18:01:23 +02001041/*
1042 * called from hardirq (IPI) context
1043 */
1044static void __hrtick_start(void *arg)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001045{
Peter Zijlstra31656512008-07-18 18:01:23 +02001046 struct rq *rq = arg;
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001047
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001048 raw_spin_lock(&rq->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001049 hrtimer_restart(&rq->hrtick_timer);
1050 rq->hrtick_csd_pending = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001051 raw_spin_unlock(&rq->lock);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001052}
1053
Peter Zijlstra31656512008-07-18 18:01:23 +02001054/*
1055 * Called to set the hrtick timer state.
1056 *
1057 * called with rq->lock held and irqs disabled
1058 */
1059static void hrtick_start(struct rq *rq, u64 delay)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001060{
Peter Zijlstra31656512008-07-18 18:01:23 +02001061 struct hrtimer *timer = &rq->hrtick_timer;
1062 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001063
Arjan van de Vencc584b22008-09-01 15:02:30 -07001064 hrtimer_set_expires(timer, time);
Peter Zijlstra31656512008-07-18 18:01:23 +02001065
1066 if (rq == this_rq()) {
1067 hrtimer_restart(timer);
1068 } else if (!rq->hrtick_csd_pending) {
Peter Zijlstra6e275632009-02-25 13:59:48 +01001069 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001070 rq->hrtick_csd_pending = 1;
1071 }
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001072}
1073
1074static int
1075hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1076{
1077 int cpu = (int)(long)hcpu;
1078
1079 switch (action) {
1080 case CPU_UP_CANCELED:
1081 case CPU_UP_CANCELED_FROZEN:
1082 case CPU_DOWN_PREPARE:
1083 case CPU_DOWN_PREPARE_FROZEN:
1084 case CPU_DEAD:
1085 case CPU_DEAD_FROZEN:
Peter Zijlstra31656512008-07-18 18:01:23 +02001086 hrtick_clear(cpu_rq(cpu));
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001087 return NOTIFY_OK;
1088 }
1089
1090 return NOTIFY_DONE;
1091}
1092
Rakib Mullickfa748202008-09-22 14:55:45 -07001093static __init void init_hrtick(void)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001094{
1095 hotcpu_notifier(hotplug_hrtick, 0);
1096}
Peter Zijlstra31656512008-07-18 18:01:23 +02001097#else
1098/*
1099 * Called to set the hrtick timer state.
1100 *
1101 * called with rq->lock held and irqs disabled
1102 */
1103static void hrtick_start(struct rq *rq, u64 delay)
1104{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001105 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +05301106 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001107}
1108
Andrew Morton006c75f2008-09-22 14:55:46 -07001109static inline void init_hrtick(void)
Peter Zijlstra31656512008-07-18 18:01:23 +02001110{
1111}
Rabin Vincent95e904c2008-05-11 05:55:33 +05301112#endif /* CONFIG_SMP */
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001113
1114static void init_rq_hrtick(struct rq *rq)
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001115{
Peter Zijlstra31656512008-07-18 18:01:23 +02001116#ifdef CONFIG_SMP
1117 rq->hrtick_csd_pending = 0;
1118
1119 rq->hrtick_csd.flags = 0;
1120 rq->hrtick_csd.func = __hrtick_start;
1121 rq->hrtick_csd.info = rq;
1122#endif
1123
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001124 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1125 rq->hrtick_timer.function = hrtick;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001126}
Andrew Morton006c75f2008-09-22 14:55:46 -07001127#else /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001128static inline void hrtick_clear(struct rq *rq)
1129{
1130}
1131
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001132static inline void init_rq_hrtick(struct rq *rq)
1133{
1134}
1135
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001136static inline void init_hrtick(void)
1137{
1138}
Andrew Morton006c75f2008-09-22 14:55:46 -07001139#endif /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001140
Ingo Molnar1b9f19c2007-07-09 18:51:59 +02001141/*
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001142 * resched_task - mark a task 'to be rescheduled now'.
1143 *
1144 * On UP this means the setting of the need_resched flag, on SMP it
1145 * might also involve a cross-CPU call to trigger the scheduler on
1146 * the target CPU.
1147 */
1148#ifdef CONFIG_SMP
1149
1150#ifndef tsk_is_polling
1151#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1152#endif
1153
Peter Zijlstra31656512008-07-18 18:01:23 +02001154static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001155{
1156 int cpu;
1157
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001158 assert_raw_spin_locked(&task_rq(p)->lock);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001159
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001160 if (test_tsk_need_resched(p))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001161 return;
1162
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001163 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001164
1165 cpu = task_cpu(p);
1166 if (cpu == smp_processor_id())
1167 return;
1168
1169 /* NEED_RESCHED must be visible before we test polling */
1170 smp_mb();
1171 if (!tsk_is_polling(p))
1172 smp_send_reschedule(cpu);
1173}
1174
1175static void resched_cpu(int cpu)
1176{
1177 struct rq *rq = cpu_rq(cpu);
1178 unsigned long flags;
1179
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001180 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001181 return;
1182 resched_task(cpu_curr(cpu));
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001183 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001184}
Thomas Gleixner06d83082008-03-22 09:20:24 +01001185
1186#ifdef CONFIG_NO_HZ
1187/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07001188 * In the semi idle case, use the nearest busy cpu for migrating timers
1189 * from an idle cpu. This is good for power-savings.
1190 *
1191 * We don't do similar optimization for completely idle system, as
1192 * selecting an idle cpu will add more delays to the timers than intended
1193 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1194 */
1195int get_nohz_timer_target(void)
1196{
1197 int cpu = smp_processor_id();
1198 int i;
1199 struct sched_domain *sd;
1200
1201 for_each_domain(cpu, sd) {
1202 for_each_cpu(i, sched_domain_span(sd))
1203 if (!idle_cpu(i))
1204 return i;
1205 }
1206 return cpu;
1207}
1208/*
Thomas Gleixner06d83082008-03-22 09:20:24 +01001209 * When add_timer_on() enqueues a timer into the timer wheel of an
1210 * idle CPU then this timer might expire before the next timer event
1211 * which is scheduled to wake up that CPU. In case of a completely
1212 * idle system the next event might even be infinite time into the
1213 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1214 * leaves the inner idle loop so the newly added timer is taken into
1215 * account when the CPU goes back to idle and evaluates the timer
1216 * wheel for the next timer event.
1217 */
1218void wake_up_idle_cpu(int cpu)
1219{
1220 struct rq *rq = cpu_rq(cpu);
1221
1222 if (cpu == smp_processor_id())
1223 return;
1224
1225 /*
1226 * This is safe, as this function is called with the timer
1227 * wheel base lock of (cpu) held. When the CPU is on the way
1228 * to idle and has not yet set rq->curr to idle then it will
1229 * be serialized on the timer wheel base lock and take the new
1230 * timer into account automatically.
1231 */
1232 if (rq->curr != rq->idle)
1233 return;
1234
1235 /*
1236 * We can set TIF_RESCHED on the idle task of the other CPU
1237 * lockless. The worst case is that the other CPU runs the
1238 * idle task through an additional NOOP schedule()
1239 */
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001240 set_tsk_need_resched(rq->idle);
Thomas Gleixner06d83082008-03-22 09:20:24 +01001241
1242 /* NEED_RESCHED must be visible before we test polling */
1243 smp_mb();
1244 if (!tsk_is_polling(rq->idle))
1245 smp_send_reschedule(cpu);
1246}
Mike Galbraith39c0cbe2010-03-11 17:17:13 +01001247
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001248#endif /* CONFIG_NO_HZ */
Thomas Gleixner06d83082008-03-22 09:20:24 +01001249
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001250static u64 sched_avg_period(void)
1251{
1252 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1253}
1254
1255static void sched_avg_update(struct rq *rq)
1256{
1257 s64 period = sched_avg_period();
1258
1259 while ((s64)(rq->clock - rq->age_stamp) > period) {
Will Deacon0d98bb22010-05-24 12:11:43 -07001260 /*
1261 * Inline assembly required to prevent the compiler
1262 * optimising this loop into a divmod call.
1263 * See __iter_div_u64_rem() for another example of this.
1264 */
1265 asm("" : "+rm" (rq->age_stamp));
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001266 rq->age_stamp += period;
1267 rq->rt_avg /= 2;
1268 }
1269}
1270
1271static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1272{
1273 rq->rt_avg += rt_delta;
1274 sched_avg_update(rq);
1275}
1276
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001277#else /* !CONFIG_SMP */
Peter Zijlstra31656512008-07-18 18:01:23 +02001278static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001279{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001280 assert_raw_spin_locked(&task_rq(p)->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001281 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001282}
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001283
1284static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1285{
1286}
Suresh Siddhada2b71e2010-08-23 13:42:51 -07001287
1288static void sched_avg_update(struct rq *rq)
1289{
1290}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001291#endif /* CONFIG_SMP */
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001292
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001293#if BITS_PER_LONG == 32
1294# define WMULT_CONST (~0UL)
1295#else
1296# define WMULT_CONST (1UL << 32)
1297#endif
1298
1299#define WMULT_SHIFT 32
1300
Ingo Molnar194081e2007-08-09 11:16:51 +02001301/*
1302 * Shift right and round:
1303 */
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001304#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
Ingo Molnar194081e2007-08-09 11:16:51 +02001305
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001306/*
1307 * delta *= weight / lw
1308 */
Ingo Molnarcb1c4fc2007-08-02 17:41:40 +02001309static unsigned long
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001310calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1311 struct load_weight *lw)
1312{
1313 u64 tmp;
1314
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001315 if (!lw->inv_weight) {
1316 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1317 lw->inv_weight = 1;
1318 else
1319 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1320 / (lw->weight+1);
1321 }
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001322
1323 tmp = (u64)delta_exec * weight;
1324 /*
1325 * Check whether we'd overflow the 64-bit multiplication:
1326 */
Ingo Molnar194081e2007-08-09 11:16:51 +02001327 if (unlikely(tmp > WMULT_CONST))
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001328 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
Ingo Molnar194081e2007-08-09 11:16:51 +02001329 WMULT_SHIFT/2);
1330 else
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001331 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001332
Ingo Molnarecf691d2007-08-02 17:41:40 +02001333 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001334}
1335
Ingo Molnar10919852007-10-15 17:00:04 +02001336static inline void update_load_add(struct load_weight *lw, unsigned long inc)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001337{
1338 lw->weight += inc;
Ingo Molnare89996a2008-03-14 23:48:28 +01001339 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001340}
1341
Ingo Molnar10919852007-10-15 17:00:04 +02001342static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001343{
1344 lw->weight -= dec;
Ingo Molnare89996a2008-03-14 23:48:28 +01001345 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001346}
1347
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001348static inline void update_load_set(struct load_weight *lw, unsigned long w)
1349{
1350 lw->weight = w;
1351 lw->inv_weight = 0;
1352}
1353
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001355 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1356 * of tasks with abnormal "nice" values across CPUs the contribution that
1357 * each task makes to its run queue's load is weighted according to its
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01001358 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
Peter Williams2dd73a42006-06-27 02:54:34 -07001359 * scaled version of the new time slice allocation that they receive on time
1360 * slice expiry etc.
1361 */
1362
Peter Zijlstracce7ade2009-01-15 14:53:37 +01001363#define WEIGHT_IDLEPRIO 3
1364#define WMULT_IDLEPRIO 1431655765
Ingo Molnardd41f592007-07-09 18:51:59 +02001365
1366/*
1367 * Nice levels are multiplicative, with a gentle 10% change for every
1368 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1369 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1370 * that remained on nice 0.
1371 *
1372 * The "10% effect" is relative and cumulative: from _any_ nice level,
1373 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
Ingo Molnarf9153ee2007-07-16 09:46:30 +02001374 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1375 * If a task goes up by ~10% and another task goes down by ~10% then
1376 * the relative distance between them is ~25%.)
Ingo Molnardd41f592007-07-09 18:51:59 +02001377 */
1378static const int prio_to_weight[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001379 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1380 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1381 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1382 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1383 /* 0 */ 1024, 820, 655, 526, 423,
1384 /* 5 */ 335, 272, 215, 172, 137,
1385 /* 10 */ 110, 87, 70, 56, 45,
1386 /* 15 */ 36, 29, 23, 18, 15,
Ingo Molnardd41f592007-07-09 18:51:59 +02001387};
1388
Ingo Molnar5714d2d2007-07-16 09:46:31 +02001389/*
1390 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1391 *
1392 * In cases where the weight does not change often, we can use the
1393 * precalculated inverse to speed up arithmetics by turning divisions
1394 * into multiplications:
1395 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001396static const u32 prio_to_wmult[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001397 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1398 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1399 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1400 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1401 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1402 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1403 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1404 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
Ingo Molnardd41f592007-07-09 18:51:59 +02001405};
Peter Williams2dd73a42006-06-27 02:54:34 -07001406
Bharata B Raoef12fef2009-03-31 10:02:22 +05301407/* Time spent by the tasks of the cpu accounting group executing in ... */
1408enum cpuacct_stat_index {
1409 CPUACCT_STAT_USER, /* ... user mode */
1410 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1411
1412 CPUACCT_STAT_NSTATS,
1413};
1414
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001415#ifdef CONFIG_CGROUP_CPUACCT
1416static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
Bharata B Raoef12fef2009-03-31 10:02:22 +05301417static void cpuacct_update_stats(struct task_struct *tsk,
1418 enum cpuacct_stat_index idx, cputime_t val);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001419#else
1420static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
Bharata B Raoef12fef2009-03-31 10:02:22 +05301421static inline void cpuacct_update_stats(struct task_struct *tsk,
1422 enum cpuacct_stat_index idx, cputime_t val) {}
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001423#endif
1424
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001425static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1426{
1427 update_load_add(&rq->load, load);
1428}
1429
1430static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1431{
1432 update_load_sub(&rq->load, load);
1433}
1434
Ingo Molnar7940ca32008-08-19 13:40:47 +02001435#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
Peter Zijlstraeb755802008-08-19 12:33:05 +02001436typedef int (*tg_visitor)(struct task_group *, void *);
1437
1438/*
1439 * Iterate the full tree, calling @down when first entering a node and @up when
1440 * leaving it for the final time.
1441 */
1442static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1443{
1444 struct task_group *parent, *child;
1445 int ret;
1446
1447 rcu_read_lock();
1448 parent = &root_task_group;
1449down:
1450 ret = (*down)(parent, data);
1451 if (ret)
1452 goto out_unlock;
1453 list_for_each_entry_rcu(child, &parent->children, siblings) {
1454 parent = child;
1455 goto down;
1456
1457up:
1458 continue;
1459 }
1460 ret = (*up)(parent, data);
1461 if (ret)
1462 goto out_unlock;
1463
1464 child = parent;
1465 parent = parent->parent;
1466 if (parent)
1467 goto up;
1468out_unlock:
1469 rcu_read_unlock();
1470
1471 return ret;
1472}
1473
1474static int tg_nop(struct task_group *tg, void *data)
1475{
1476 return 0;
1477}
1478#endif
1479
Gregory Haskinse7693a32008-01-25 21:08:09 +01001480#ifdef CONFIG_SMP
Peter Zijlstraf5f08f32009-09-10 13:35:28 +02001481/* Used instead of source_load when we know the type == 0 */
1482static unsigned long weighted_cpuload(const int cpu)
1483{
1484 return cpu_rq(cpu)->load.weight;
1485}
1486
1487/*
1488 * Return a low guess at the load of a migration-source cpu weighted
1489 * according to the scheduling class and "nice" value.
1490 *
1491 * We want to under-estimate the load of migration sources, to
1492 * balance conservatively.
1493 */
1494static unsigned long source_load(int cpu, int type)
1495{
1496 struct rq *rq = cpu_rq(cpu);
1497 unsigned long total = weighted_cpuload(cpu);
1498
1499 if (type == 0 || !sched_feat(LB_BIAS))
1500 return total;
1501
1502 return min(rq->cpu_load[type-1], total);
1503}
1504
1505/*
1506 * Return a high guess at the load of a migration-target cpu weighted
1507 * according to the scheduling class and "nice" value.
1508 */
1509static unsigned long target_load(int cpu, int type)
1510{
1511 struct rq *rq = cpu_rq(cpu);
1512 unsigned long total = weighted_cpuload(cpu);
1513
1514 if (type == 0 || !sched_feat(LB_BIAS))
1515 return total;
1516
1517 return max(rq->cpu_load[type-1], total);
1518}
1519
Peter Zijlstraae154be2009-09-10 14:40:57 +02001520static unsigned long power_of(int cpu)
1521{
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02001522 return cpu_rq(cpu)->cpu_power;
Peter Zijlstraae154be2009-09-10 14:40:57 +02001523}
1524
Gregory Haskinse7693a32008-01-25 21:08:09 +01001525static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001526
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001527static unsigned long cpu_avg_load_per_task(int cpu)
1528{
1529 struct rq *rq = cpu_rq(cpu);
Ingo Molnaraf6d5962008-11-29 20:45:15 +01001530 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001531
Steven Rostedt4cd42622008-11-26 21:04:24 -05001532 if (nr_running)
1533 rq->avg_load_per_task = rq->load.weight / nr_running;
Balbir Singha2d47772008-11-12 16:19:00 +05301534 else
1535 rq->avg_load_per_task = 0;
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001536
1537 return rq->avg_load_per_task;
1538}
1539
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001540#ifdef CONFIG_FAIR_GROUP_SCHED
1541
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001542/*
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001543 * Compute the cpu's hierarchical load factor for each task group.
1544 * This needs to be done in a top-down fashion because the load of a child
1545 * group is a fraction of its parents load.
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001546 */
Peter Zijlstraeb755802008-08-19 12:33:05 +02001547static int tg_load_down(struct task_group *tg, void *data)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001548{
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001549 unsigned long load;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001550 long cpu = (long)data;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001551
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001552 if (!tg->parent) {
1553 load = cpu_rq(cpu)->load.weight;
1554 } else {
1555 load = tg->parent->cfs_rq[cpu]->h_load;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001556 load *= tg->se[cpu]->load.weight;
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001557 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1558 }
1559
1560 tg->cfs_rq[cpu]->h_load = load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001561
Peter Zijlstraeb755802008-08-19 12:33:05 +02001562 return 0;
Peter Zijlstra4d8d5952008-06-27 13:41:19 +02001563}
1564
Peter Zijlstraeb755802008-08-19 12:33:05 +02001565static void update_h_load(long cpu)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001566{
Peter Zijlstraeb755802008-08-19 12:33:05 +02001567 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001568}
1569
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001570#endif
1571
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001572#ifdef CONFIG_PREEMPT
1573
Peter Zijlstrab78bb862009-09-15 14:23:18 +02001574static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1575
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001576/*
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001577 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1578 * way at the expense of forcing extra atomic operations in all
1579 * invocations. This assures that the double_lock is acquired using the
1580 * same underlying policy as the spinlock_t on this architecture, which
1581 * reduces latency compared to the unfair variant below. However, it
1582 * also adds more overhead and therefore may reduce throughput.
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001583 */
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001584static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1585 __releases(this_rq->lock)
1586 __acquires(busiest->lock)
1587 __acquires(this_rq->lock)
1588{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001589 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001590 double_rq_lock(this_rq, busiest);
1591
1592 return 1;
1593}
1594
1595#else
1596/*
1597 * Unfair double_lock_balance: Optimizes throughput at the expense of
1598 * latency by eliminating extra atomic operations when the locks are
1599 * already in proper order on entry. This favors lower cpu-ids and will
1600 * grant the double lock to lower cpus over higher ids under contention,
1601 * regardless of entry order into the function.
1602 */
1603static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001604 __releases(this_rq->lock)
1605 __acquires(busiest->lock)
1606 __acquires(this_rq->lock)
1607{
1608 int ret = 0;
1609
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001610 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001611 if (busiest < this_rq) {
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001612 raw_spin_unlock(&this_rq->lock);
1613 raw_spin_lock(&busiest->lock);
1614 raw_spin_lock_nested(&this_rq->lock,
1615 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001616 ret = 1;
1617 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001618 raw_spin_lock_nested(&busiest->lock,
1619 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001620 }
1621 return ret;
1622}
1623
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001624#endif /* CONFIG_PREEMPT */
1625
1626/*
1627 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1628 */
1629static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1630{
1631 if (unlikely(!irqs_disabled())) {
1632 /* printk() doesn't work good under rq->lock */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001633 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001634 BUG_ON(1);
1635 }
1636
1637 return _double_lock_balance(this_rq, busiest);
1638}
1639
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001640static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(busiest->lock)
1642{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001643 raw_spin_unlock(&busiest->lock);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001644 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1645}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001646
1647/*
1648 * double_rq_lock - safely lock two runqueues
1649 *
1650 * Note this does not disable interrupts like task_rq_lock,
1651 * you need to do so manually before calling.
1652 */
1653static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1654 __acquires(rq1->lock)
1655 __acquires(rq2->lock)
1656{
1657 BUG_ON(!irqs_disabled());
1658 if (rq1 == rq2) {
1659 raw_spin_lock(&rq1->lock);
1660 __acquire(rq2->lock); /* Fake it out ;) */
1661 } else {
1662 if (rq1 < rq2) {
1663 raw_spin_lock(&rq1->lock);
1664 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1665 } else {
1666 raw_spin_lock(&rq2->lock);
1667 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1668 }
1669 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001670}
1671
1672/*
1673 * double_rq_unlock - safely unlock two runqueues
1674 *
1675 * Note this does not restore interrupts like task_rq_unlock,
1676 * you need to do so manually after calling.
1677 */
1678static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1679 __releases(rq1->lock)
1680 __releases(rq2->lock)
1681{
1682 raw_spin_unlock(&rq1->lock);
1683 if (rq1 != rq2)
1684 raw_spin_unlock(&rq2->lock);
1685 else
1686 __release(rq2->lock);
1687}
1688
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001689#endif
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001690
Peter Zijlstra74f51872010-04-22 21:50:19 +02001691static void calc_load_account_idle(struct rq *this_rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01001692static void update_sysctl(void);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01001693static int get_update_sysctl_factor(void);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07001694static void update_cpu_load(struct rq *this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02001695
Peter Zijlstracd29fe62009-11-27 17:32:46 +01001696static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1697{
1698 set_task_rq(p, cpu);
1699#ifdef CONFIG_SMP
1700 /*
1701 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1702 * successfuly executed on another CPU. We must ensure that updates of
1703 * per-task data have been completed by this moment.
1704 */
1705 smp_wmb();
1706 task_thread_info(p)->cpu = cpu;
1707#endif
1708}
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001709
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001710static const struct sched_class rt_sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02001711
Peter Zijlstra34f971f2010-09-22 13:53:15 +02001712#define sched_class_highest (&stop_sched_class)
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001713#define for_each_class(class) \
1714 for (class = sched_class_highest; class; class = class->next)
Ingo Molnardd41f592007-07-09 18:51:59 +02001715
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001716#include "sched_stats.h"
1717
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001718static void inc_nr_running(struct rq *rq)
Ingo Molnar6363ca52008-05-29 11:28:57 +02001719{
1720 rq->nr_running++;
Ingo Molnar6363ca52008-05-29 11:28:57 +02001721}
1722
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001723static void dec_nr_running(struct rq *rq)
Ingo Molnar9c217242007-08-02 17:41:40 +02001724{
1725 rq->nr_running--;
Ingo Molnar9c217242007-08-02 17:41:40 +02001726}
1727
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001728static void set_load_weight(struct task_struct *p)
1729{
Ingo Molnardd41f592007-07-09 18:51:59 +02001730 /*
1731 * SCHED_IDLE tasks get minimal weight:
1732 */
1733 if (p->policy == SCHED_IDLE) {
1734 p->se.load.weight = WEIGHT_IDLEPRIO;
1735 p->se.load.inv_weight = WMULT_IDLEPRIO;
1736 return;
1737 }
1738
1739 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1740 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001741}
1742
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001743static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
Gregory Haskins2087a1a2008-06-27 14:30:00 -06001744{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001745 update_rq_clock(rq);
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001746 sched_info_queued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001747 p->sched_class->enqueue_task(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02001748 p->se.on_rq = 1;
1749}
1750
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001751static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnardd41f592007-07-09 18:51:59 +02001752{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001753 update_rq_clock(rq);
Ankita Garg46ac22b2008-07-01 14:30:06 +05301754 sched_info_dequeued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001755 p->sched_class->dequeue_task(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02001756 p->se.on_rq = 0;
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001757}
1758
1759/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001760 * activate_task - move a task to the runqueue.
1761 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001762static void activate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001763{
1764 if (task_contributes_to_load(p))
1765 rq->nr_uninterruptible--;
1766
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001767 enqueue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001768 inc_nr_running(rq);
1769}
1770
1771/*
1772 * deactivate_task - remove a task from the runqueue.
1773 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001774static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001775{
1776 if (task_contributes_to_load(p))
1777 rq->nr_uninterruptible++;
1778
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001779 dequeue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001780 dec_nr_running(rq);
1781}
1782
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001783#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1784
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001785/*
1786 * There are no locks covering percpu hardirq/softirq time.
1787 * They are only modified in account_system_vtime, on corresponding CPU
1788 * with interrupts disabled. So, writes are safe.
1789 * They are read and saved off onto struct rq in update_rq_clock().
1790 * This may result in other CPU reading this CPU's irq time and can
1791 * race with irq/account_system_vtime on this CPU. We would either get old
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001792 * or new value with a side effect of accounting a slice of irq time to wrong
1793 * task when irq is in progress while we read rq->clock. That is a worthy
1794 * compromise in place of having locks on each irq in account_system_time.
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001795 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001796static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1797static DEFINE_PER_CPU(u64, cpu_softirq_time);
1798
1799static DEFINE_PER_CPU(u64, irq_start_time);
1800static int sched_clock_irqtime;
1801
1802void enable_sched_clock_irqtime(void)
1803{
1804 sched_clock_irqtime = 1;
1805}
1806
1807void disable_sched_clock_irqtime(void)
1808{
1809 sched_clock_irqtime = 0;
1810}
1811
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001812#ifndef CONFIG_64BIT
1813static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
1814
1815static inline void irq_time_write_begin(void)
1816{
1817 __this_cpu_inc(irq_time_seq.sequence);
1818 smp_wmb();
1819}
1820
1821static inline void irq_time_write_end(void)
1822{
1823 smp_wmb();
1824 __this_cpu_inc(irq_time_seq.sequence);
1825}
1826
1827static inline u64 irq_time_read(int cpu)
1828{
1829 u64 irq_time;
1830 unsigned seq;
1831
1832 do {
1833 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1834 irq_time = per_cpu(cpu_softirq_time, cpu) +
1835 per_cpu(cpu_hardirq_time, cpu);
1836 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1837
1838 return irq_time;
1839}
1840#else /* CONFIG_64BIT */
1841static inline void irq_time_write_begin(void)
1842{
1843}
1844
1845static inline void irq_time_write_end(void)
1846{
1847}
1848
1849static inline u64 irq_time_read(int cpu)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001850{
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001851 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1852}
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001853#endif /* CONFIG_64BIT */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001854
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001855/*
1856 * Called before incrementing preempt_count on {soft,}irq_enter
1857 * and before decrementing preempt_count on {soft,}irq_exit.
1858 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001859void account_system_vtime(struct task_struct *curr)
1860{
1861 unsigned long flags;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001862 s64 delta;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001863 int cpu;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001864
1865 if (!sched_clock_irqtime)
1866 return;
1867
1868 local_irq_save(flags);
1869
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001870 cpu = smp_processor_id();
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001871 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
1872 __this_cpu_add(irq_start_time, delta);
1873
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001874 irq_time_write_begin();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001875 /*
1876 * We do not account for softirq time from ksoftirqd here.
1877 * We want to continue accounting softirq time to ksoftirqd thread
1878 * in that case, so as not to confuse scheduler with a special task
1879 * that do not consume any time, but still wants to run.
1880 */
1881 if (hardirq_count())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001882 __this_cpu_add(cpu_hardirq_time, delta);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001883 else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001884 __this_cpu_add(cpu_softirq_time, delta);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001885
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001886 irq_time_write_end();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001887 local_irq_restore(flags);
1888}
Ingo Molnarb7dadc32010-10-18 20:00:37 +02001889EXPORT_SYMBOL_GPL(account_system_vtime);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001890
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001891static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001892{
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001893 s64 irq_delta;
1894
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001895 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001896
1897 /*
1898 * Since irq_time is only updated on {soft,}irq_exit, we might run into
1899 * this case when a previous update_rq_clock() happened inside a
1900 * {soft,}irq region.
1901 *
1902 * When this happens, we stop ->clock_task and only update the
1903 * prev_irq_time stamp to account for the part that fit, so that a next
1904 * update will consume the rest. This ensures ->clock_task is
1905 * monotonic.
1906 *
1907 * It does however cause some slight miss-attribution of {soft,}irq
1908 * time, a more accurate solution would be to update the irq_time using
1909 * the current rq->clock timestamp, except that would require using
1910 * atomic ops.
1911 */
1912 if (irq_delta > delta)
1913 irq_delta = delta;
1914
1915 rq->prev_irq_time += irq_delta;
1916 delta -= irq_delta;
1917 rq->clock_task += delta;
1918
1919 if (irq_delta && sched_feat(NONIRQ_POWER))
1920 sched_rt_avg_update(rq, irq_delta);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001921}
1922
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001923#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001924
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001925static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001926{
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001927 rq->clock_task += delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001928}
1929
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001930#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001931
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001932#include "sched_idletask.c"
1933#include "sched_fair.c"
1934#include "sched_rt.c"
Mike Galbraith5091faa2010-11-30 14:18:03 +01001935#include "sched_autogroup.c"
Peter Zijlstra34f971f2010-09-22 13:53:15 +02001936#include "sched_stoptask.c"
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001937#ifdef CONFIG_SCHED_DEBUG
1938# include "sched_debug.c"
1939#endif
1940
Peter Zijlstra34f971f2010-09-22 13:53:15 +02001941void sched_set_stop_task(int cpu, struct task_struct *stop)
1942{
1943 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1944 struct task_struct *old_stop = cpu_rq(cpu)->stop;
1945
1946 if (stop) {
1947 /*
1948 * Make it appear like a SCHED_FIFO task, its something
1949 * userspace knows about and won't get confused about.
1950 *
1951 * Also, it will make PI more or less work without too
1952 * much confusion -- but then, stop work should not
1953 * rely on PI working anyway.
1954 */
1955 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
1956
1957 stop->sched_class = &stop_sched_class;
1958 }
1959
1960 cpu_rq(cpu)->stop = stop;
1961
1962 if (old_stop) {
1963 /*
1964 * Reset it back to a normal scheduling class so that
1965 * it can die in pieces.
1966 */
1967 old_stop->sched_class = &rt_sched_class;
1968 }
1969}
1970
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001971/*
Ingo Molnardd41f592007-07-09 18:51:59 +02001972 * __normal_prio - return the priority that is based on the static prio
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001973 */
Ingo Molnar14531182007-07-09 18:51:59 +02001974static inline int __normal_prio(struct task_struct *p)
1975{
Ingo Molnardd41f592007-07-09 18:51:59 +02001976 return p->static_prio;
Ingo Molnar14531182007-07-09 18:51:59 +02001977}
1978
1979/*
Ingo Molnarb29739f2006-06-27 02:54:51 -07001980 * Calculate the expected normal priority: i.e. priority
1981 * without taking RT-inheritance into account. Might be
1982 * boosted by interactivity modifiers. Changes upon fork,
1983 * setprio syscalls, and whenever the interactivity
1984 * estimator recalculates.
1985 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001986static inline int normal_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07001987{
1988 int prio;
1989
Ingo Molnare05606d2007-07-09 18:51:59 +02001990 if (task_has_rt_policy(p))
Ingo Molnarb29739f2006-06-27 02:54:51 -07001991 prio = MAX_RT_PRIO-1 - p->rt_priority;
1992 else
1993 prio = __normal_prio(p);
1994 return prio;
1995}
1996
1997/*
1998 * Calculate the current priority, i.e. the priority
1999 * taken into account by the scheduler. This value might
2000 * be boosted by RT tasks, or might be boosted by
2001 * interactivity modifiers. Will be RT if the task got
2002 * RT-boosted. If not then it returns p->normal_prio.
2003 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002004static int effective_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002005{
2006 p->normal_prio = normal_prio(p);
2007 /*
2008 * If we are RT tasks or we were boosted to RT priority,
2009 * keep the priority unchanged. Otherwise, update priority
2010 * to the normal priority:
2011 */
2012 if (!rt_prio(p->prio))
2013 return p->normal_prio;
2014 return p->prio;
2015}
2016
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017/**
2018 * task_curr - is this task currently executing on a CPU?
2019 * @p: the task in question.
2020 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002021inline int task_curr(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022{
2023 return cpu_curr(task_cpu(p)) == p;
2024}
2025
Steven Rostedtcb469842008-01-25 21:08:22 +01002026static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2027 const struct sched_class *prev_class,
2028 int oldprio, int running)
2029{
2030 if (prev_class != p->sched_class) {
2031 if (prev_class->switched_from)
2032 prev_class->switched_from(rq, p, running);
2033 p->sched_class->switched_to(rq, p, running);
2034 } else
2035 p->sched_class->prio_changed(rq, p, oldprio, running);
2036}
2037
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002038static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2039{
2040 const struct sched_class *class;
2041
2042 if (p->sched_class == rq->curr->sched_class) {
2043 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2044 } else {
2045 for_each_class(class) {
2046 if (class == rq->curr->sched_class)
2047 break;
2048 if (class == p->sched_class) {
2049 resched_task(rq->curr);
2050 break;
2051 }
2052 }
2053 }
2054
2055 /*
2056 * A queue event has occurred, and we're going to schedule. In
2057 * this case, we can save a useless back to back clock update.
2058 */
Mike Galbraithf26f9af2010-12-08 11:05:42 +01002059 if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002060 rq->skip_clock_update = 1;
2061}
2062
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063#ifdef CONFIG_SMP
Ingo Molnarcc367732007-10-15 17:00:18 +02002064/*
2065 * Is this task likely cache-hot:
2066 */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002067static int
Ingo Molnarcc367732007-10-15 17:00:18 +02002068task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2069{
2070 s64 delta;
2071
Peter Zijlstrae6c8fba2009-12-16 18:04:33 +01002072 if (p->sched_class != &fair_sched_class)
2073 return 0;
2074
Nikhil Raoef8002f2010-10-13 12:09:35 -07002075 if (unlikely(p->policy == SCHED_IDLE))
2076 return 0;
2077
Ingo Molnarf540a602008-03-15 17:10:34 +01002078 /*
2079 * Buddy candidates are cache hot:
2080 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002081 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
Peter Zijlstra47932412008-11-04 21:25:09 +01002082 (&p->se == cfs_rq_of(&p->se)->next ||
2083 &p->se == cfs_rq_of(&p->se)->last))
Ingo Molnarf540a602008-03-15 17:10:34 +01002084 return 1;
2085
Ingo Molnar6bc16652007-10-15 17:00:18 +02002086 if (sysctl_sched_migration_cost == -1)
2087 return 1;
2088 if (sysctl_sched_migration_cost == 0)
2089 return 0;
2090
Ingo Molnarcc367732007-10-15 17:00:18 +02002091 delta = now - p->se.exec_start;
2092
2093 return delta < (s64)sysctl_sched_migration_cost;
2094}
2095
Ingo Molnardd41f592007-07-09 18:51:59 +02002096void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Ingo Molnarc65cc872007-07-09 18:51:58 +02002097{
Peter Zijlstrae2912002009-12-16 18:04:36 +01002098#ifdef CONFIG_SCHED_DEBUG
2099 /*
2100 * We should never call set_task_cpu() on a blocked task,
2101 * ttwu() will sort out the placement.
2102 */
Peter Zijlstra077614e2009-12-17 13:16:31 +01002103 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2104 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
Peter Zijlstrae2912002009-12-16 18:04:36 +01002105#endif
2106
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +08002107 trace_sched_migrate_task(p, new_cpu);
Peter Zijlstracbc34ed2008-12-10 08:08:22 +01002108
Peter Zijlstra0c697742009-12-22 15:43:19 +01002109 if (task_cpu(p) != new_cpu) {
2110 p->se.nr_migrations++;
2111 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2112 }
Ingo Molnardd41f592007-07-09 18:51:59 +02002113
2114 __set_task_cpu(p, new_cpu);
Ingo Molnarc65cc872007-07-09 18:51:58 +02002115}
2116
Tejun Heo969c7922010-05-06 18:49:21 +02002117struct migration_arg {
Ingo Molnar36c8b582006-07-03 00:25:41 -07002118 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 int dest_cpu;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002120};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
Tejun Heo969c7922010-05-06 18:49:21 +02002122static int migration_cpu_stop(void *data);
2123
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124/*
2125 * The task's runqueue lock must be held.
2126 * Returns true if you have to wait for migration thread.
2127 */
Nikanth Karthikesanb7a2b392010-11-26 12:37:09 +05302128static bool migrate_task(struct task_struct *p, struct rq *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 /*
2131 * If the task is not on a runqueue (and not running), then
Peter Zijlstrae2912002009-12-16 18:04:36 +01002132 * the next wake-up will properly place the task.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 */
Tejun Heo969c7922010-05-06 18:49:21 +02002134 return p->se.on_rq || task_running(rq, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135}
2136
2137/*
2138 * wait_task_inactive - wait for a thread to unschedule.
2139 *
Roland McGrath85ba2d82008-07-25 19:45:58 -07002140 * If @match_state is nonzero, it's the @p->state value just checked and
2141 * not expected to change. If it changes, i.e. @p might have woken up,
2142 * then return zero. When we succeed in waiting for @p to be off its CPU,
2143 * we return a positive number (its total switch count). If a second call
2144 * a short while later returns the same number, the caller can be sure that
2145 * @p has remained unscheduled the whole time.
2146 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 * The caller must ensure that the task *will* unschedule sometime soon,
2148 * else this function might spin for a *long* time. This function can't
2149 * be called with interrupts off, or it may introduce deadlock with
2150 * smp_call_function() if an IPI is sent by the same process we are
2151 * waiting to become inactive.
2152 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002153unsigned long wait_task_inactive(struct task_struct *p, long match_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154{
2155 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002156 int running, on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002157 unsigned long ncsw;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002158 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159
Andi Kleen3a5c3592007-10-15 17:00:14 +02002160 for (;;) {
2161 /*
2162 * We do the initial early heuristics without holding
2163 * any task-queue locks at all. We'll only try to get
2164 * the runqueue lock when things look like they will
2165 * work out!
2166 */
2167 rq = task_rq(p);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002168
Andi Kleen3a5c3592007-10-15 17:00:14 +02002169 /*
2170 * If the task is actively running on another CPU
2171 * still, just relax and busy-wait without holding
2172 * any locks.
2173 *
2174 * NOTE! Since we don't hold any locks, it's not
2175 * even sure that "rq" stays as the right runqueue!
2176 * But we don't care, since "task_running()" will
2177 * return false if the runqueue has changed and p
2178 * is actually now running somewhere else!
2179 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002180 while (task_running(rq, p)) {
2181 if (match_state && unlikely(p->state != match_state))
2182 return 0;
Andi Kleen3a5c3592007-10-15 17:00:14 +02002183 cpu_relax();
Roland McGrath85ba2d82008-07-25 19:45:58 -07002184 }
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002185
Andi Kleen3a5c3592007-10-15 17:00:14 +02002186 /*
2187 * Ok, time to look more closely! We need the rq
2188 * lock now, to be *sure*. If we're wrong, we'll
2189 * just go back and repeat.
2190 */
2191 rq = task_rq_lock(p, &flags);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002192 trace_sched_wait_task(p);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002193 running = task_running(rq, p);
2194 on_rq = p->se.on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002195 ncsw = 0;
Oleg Nesterovf31e11d2008-08-20 16:54:44 -07002196 if (!match_state || p->state == match_state)
Oleg Nesterov93dcf552008-08-20 16:54:44 -07002197 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
Andi Kleen3a5c3592007-10-15 17:00:14 +02002198 task_rq_unlock(rq, &flags);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002199
Andi Kleen3a5c3592007-10-15 17:00:14 +02002200 /*
Roland McGrath85ba2d82008-07-25 19:45:58 -07002201 * If it changed from the expected state, bail out now.
2202 */
2203 if (unlikely(!ncsw))
2204 break;
2205
2206 /*
Andi Kleen3a5c3592007-10-15 17:00:14 +02002207 * Was it really running after all now that we
2208 * checked with the proper locks actually held?
2209 *
2210 * Oops. Go back and try again..
2211 */
2212 if (unlikely(running)) {
2213 cpu_relax();
2214 continue;
2215 }
2216
2217 /*
2218 * It's not enough that it's not actively running,
2219 * it must be off the runqueue _entirely_, and not
2220 * preempted!
2221 *
Luis Henriques80dd99b2009-03-16 19:58:09 +00002222 * So if it was still runnable (but just not actively
Andi Kleen3a5c3592007-10-15 17:00:14 +02002223 * running right now), it's preempted, and we should
2224 * yield - it could be a while.
2225 */
2226 if (unlikely(on_rq)) {
2227 schedule_timeout_uninterruptible(1);
2228 continue;
2229 }
2230
2231 /*
2232 * Ahh, all good. It wasn't running, and it wasn't
2233 * runnable, which means that it will never become
2234 * running in the future either. We're all done!
2235 */
2236 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 }
Roland McGrath85ba2d82008-07-25 19:45:58 -07002238
2239 return ncsw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240}
2241
2242/***
2243 * kick_process - kick a running thread to enter/exit the kernel
2244 * @p: the to-be-kicked thread
2245 *
2246 * Cause a process which is running on another CPU to enter
2247 * kernel-mode, without any delay. (to get signals handled.)
2248 *
2249 * NOTE: this function doesnt have to take the runqueue lock,
2250 * because all it wants to ensure is that the remote task enters
2251 * the kernel. If the IPI races and the task has been migrated
2252 * to another CPU then no harm is done and the purpose has been
2253 * achieved as well.
2254 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002255void kick_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256{
2257 int cpu;
2258
2259 preempt_disable();
2260 cpu = task_cpu(p);
2261 if ((cpu != smp_processor_id()) && task_curr(p))
2262 smp_send_reschedule(cpu);
2263 preempt_enable();
2264}
Rusty Russellb43e3522009-06-12 22:27:00 -06002265EXPORT_SYMBOL_GPL(kick_process);
Nick Piggin476d1392005-06-25 14:57:29 -07002266#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
Thomas Gleixner0793a612008-12-04 20:12:29 +01002268/**
2269 * task_oncpu_function_call - call a function on the cpu on which a task runs
2270 * @p: the task to evaluate
2271 * @func: the function to be called
2272 * @info: the function call argument
2273 *
2274 * Calls the function @func when the task is currently running. This might
2275 * be on the current CPU, which just calls the function directly
2276 */
2277void task_oncpu_function_call(struct task_struct *p,
2278 void (*func) (void *info), void *info)
2279{
2280 int cpu;
2281
2282 preempt_disable();
2283 cpu = task_cpu(p);
2284 if (task_curr(p))
2285 smp_call_function_single(cpu, func, info, 1);
2286 preempt_enable();
2287}
2288
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002289#ifdef CONFIG_SMP
Oleg Nesterov30da6882010-03-15 10:10:19 +01002290/*
2291 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
2292 */
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002293static int select_fallback_rq(int cpu, struct task_struct *p)
2294{
2295 int dest_cpu;
2296 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2297
2298 /* Look for allowed, online CPU in same node. */
2299 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2300 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2301 return dest_cpu;
2302
2303 /* Any allowed, online CPU? */
2304 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2305 if (dest_cpu < nr_cpu_ids)
2306 return dest_cpu;
2307
2308 /* No more Mr. Nice Guy. */
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01002309 dest_cpu = cpuset_cpus_allowed_fallback(p);
2310 /*
2311 * Don't tell them about moving exiting tasks or
2312 * kernel threads (both mm NULL), since they never
2313 * leave kernel.
2314 */
2315 if (p->mm && printk_ratelimit()) {
2316 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
2317 task_pid_nr(p), p->comm, cpu);
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002318 }
2319
2320 return dest_cpu;
2321}
2322
Peter Zijlstrae2912002009-12-16 18:04:36 +01002323/*
Oleg Nesterov30da6882010-03-15 10:10:19 +01002324 * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
Peter Zijlstrae2912002009-12-16 18:04:36 +01002325 */
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002326static inline
Peter Zijlstra0017d732010-03-24 18:34:10 +01002327int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002328{
Peter Zijlstra0017d732010-03-24 18:34:10 +01002329 int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002330
2331 /*
2332 * In order not to call set_task_cpu() on a blocking task we need
2333 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2334 * cpu.
2335 *
2336 * Since this is common to all placement strategies, this lives here.
2337 *
2338 * [ this allows ->select_task() to simply return task_cpu(p) and
2339 * not worry about this generic constraint ]
2340 */
2341 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
Peter Zijlstra70f11202009-12-20 17:36:27 +01002342 !cpu_online(cpu)))
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002343 cpu = select_fallback_rq(task_cpu(p), p);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002344
2345 return cpu;
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002346}
Mike Galbraith09a40af2010-04-15 07:29:59 +02002347
2348static void update_avg(u64 *avg, u64 sample)
2349{
2350 s64 diff = sample - *avg;
2351 *avg += diff >> 3;
2352}
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002353#endif
2354
Tejun Heo9ed38112009-12-03 15:08:03 +09002355static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
2356 bool is_sync, bool is_migrate, bool is_local,
2357 unsigned long en_flags)
2358{
2359 schedstat_inc(p, se.statistics.nr_wakeups);
2360 if (is_sync)
2361 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2362 if (is_migrate)
2363 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2364 if (is_local)
2365 schedstat_inc(p, se.statistics.nr_wakeups_local);
2366 else
2367 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2368
2369 activate_task(rq, p, en_flags);
2370}
2371
2372static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
2373 int wake_flags, bool success)
2374{
2375 trace_sched_wakeup(p, success);
2376 check_preempt_curr(rq, p, wake_flags);
2377
2378 p->state = TASK_RUNNING;
2379#ifdef CONFIG_SMP
2380 if (p->sched_class->task_woken)
2381 p->sched_class->task_woken(rq, p);
2382
2383 if (unlikely(rq->idle_stamp)) {
2384 u64 delta = rq->clock - rq->idle_stamp;
2385 u64 max = 2*sysctl_sched_migration_cost;
2386
2387 if (delta > max)
2388 rq->avg_idle = max;
2389 else
2390 update_avg(&rq->avg_idle, delta);
2391 rq->idle_stamp = 0;
2392 }
2393#endif
Tejun Heo21aa9af2010-06-08 21:40:37 +02002394 /* if a worker is waking up, notify workqueue */
2395 if ((p->flags & PF_WQ_WORKER) && success)
2396 wq_worker_waking_up(p, cpu_of(rq));
Tejun Heo9ed38112009-12-03 15:08:03 +09002397}
2398
2399/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 * try_to_wake_up - wake up a thread
Tejun Heo9ed38112009-12-03 15:08:03 +09002401 * @p: the thread to be awakened
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 * @state: the mask of task states that can be woken
Tejun Heo9ed38112009-12-03 15:08:03 +09002403 * @wake_flags: wake modifier flags (WF_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 *
2405 * Put it on the run-queue if it's not already there. The "current"
2406 * thread is always on the run-queue (except when the actual
2407 * re-schedule is in progress), and as such you're allowed to do
2408 * the simpler "current->state = TASK_RUNNING" to mark yourself
2409 * runnable without the overhead of this.
2410 *
Tejun Heo9ed38112009-12-03 15:08:03 +09002411 * Returns %true if @p was woken up, %false if it was already running
2412 * or @state didn't match @p's state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02002414static int try_to_wake_up(struct task_struct *p, unsigned int state,
2415 int wake_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416{
Ingo Molnarcc367732007-10-15 17:00:18 +02002417 int cpu, orig_cpu, this_cpu, success = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 unsigned long flags;
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002419 unsigned long en_flags = ENQUEUE_WAKEUP;
Dan Carpenterab3b3aa2010-03-06 14:17:52 +03002420 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002422 this_cpu = get_cpu();
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02002423
Linus Torvalds04e2f172008-02-23 18:05:03 -08002424 smp_wmb();
Dan Carpenterab3b3aa2010-03-06 14:17:52 +03002425 rq = task_rq_lock(p, &flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002426 if (!(p->state & state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 goto out;
2428
Ingo Molnardd41f592007-07-09 18:51:59 +02002429 if (p->se.on_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 goto out_running;
2431
2432 cpu = task_cpu(p);
Ingo Molnarcc367732007-10-15 17:00:18 +02002433 orig_cpu = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434
2435#ifdef CONFIG_SMP
2436 if (unlikely(task_running(rq, p)))
2437 goto out_activate;
2438
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002439 /*
2440 * In order to handle concurrent wakeups and release the rq->lock
2441 * we put the task in TASK_WAKING state.
Ingo Molnareb24073b2009-09-16 21:09:13 +02002442 *
2443 * First fix up the nr_uninterruptible count:
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002444 */
Peter Zijlstracc87f762010-03-26 12:22:14 +01002445 if (task_contributes_to_load(p)) {
2446 if (likely(cpu_online(orig_cpu)))
2447 rq->nr_uninterruptible--;
2448 else
2449 this_rq()->nr_uninterruptible--;
2450 }
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002451 p->state = TASK_WAKING;
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002452
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002453 if (p->sched_class->task_waking) {
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002454 p->sched_class->task_waking(rq, p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002455 en_flags |= ENQUEUE_WAKING;
Peter Zijlstra0970d292010-02-15 14:45:54 +01002456 }
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002457
Peter Zijlstra0017d732010-03-24 18:34:10 +01002458 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2459 if (cpu != orig_cpu)
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002460 set_task_cpu(p, cpu);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002461 __task_rq_unlock(rq);
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002462
Peter Zijlstra0970d292010-02-15 14:45:54 +01002463 rq = cpu_rq(cpu);
2464 raw_spin_lock(&rq->lock);
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002465
Peter Zijlstra0970d292010-02-15 14:45:54 +01002466 /*
2467 * We migrated the task without holding either rq->lock, however
2468 * since the task is not on the task list itself, nobody else
2469 * will try and migrate the task, hence the rq should match the
2470 * cpu we just moved it to.
2471 */
2472 WARN_ON(task_cpu(p) != cpu);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002473 WARN_ON(p->state != TASK_WAKING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
Gregory Haskinse7693a32008-01-25 21:08:09 +01002475#ifdef CONFIG_SCHEDSTATS
2476 schedstat_inc(rq, ttwu_count);
2477 if (cpu == this_cpu)
2478 schedstat_inc(rq, ttwu_local);
2479 else {
2480 struct sched_domain *sd;
2481 for_each_domain(this_cpu, sd) {
Rusty Russell758b2cd2008-11-25 02:35:04 +10302482 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Gregory Haskinse7693a32008-01-25 21:08:09 +01002483 schedstat_inc(sd, ttwu_wake_remote);
2484 break;
2485 }
2486 }
2487 }
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002488#endif /* CONFIG_SCHEDSTATS */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002489
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490out_activate:
2491#endif /* CONFIG_SMP */
Tejun Heo9ed38112009-12-03 15:08:03 +09002492 ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
2493 cpu == this_cpu, en_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 success = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495out_running:
Tejun Heo9ed38112009-12-03 15:08:03 +09002496 ttwu_post_activation(p, rq, wake_flags, success);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497out:
2498 task_rq_unlock(rq, &flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002499 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
2501 return success;
2502}
2503
David Howells50fa6102009-04-28 15:01:38 +01002504/**
Tejun Heo21aa9af2010-06-08 21:40:37 +02002505 * try_to_wake_up_local - try to wake up a local task with rq lock held
2506 * @p: the thread to be awakened
2507 *
2508 * Put @p on the run-queue if it's not alredy there. The caller must
2509 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2510 * the current task. this_rq() stays locked over invocation.
2511 */
2512static void try_to_wake_up_local(struct task_struct *p)
2513{
2514 struct rq *rq = task_rq(p);
2515 bool success = false;
2516
2517 BUG_ON(rq != this_rq());
2518 BUG_ON(p == current);
2519 lockdep_assert_held(&rq->lock);
2520
2521 if (!(p->state & TASK_NORMAL))
2522 return;
2523
2524 if (!p->se.on_rq) {
2525 if (likely(!task_running(rq, p))) {
2526 schedstat_inc(rq, ttwu_count);
2527 schedstat_inc(rq, ttwu_local);
2528 }
2529 ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP);
2530 success = true;
2531 }
2532 ttwu_post_activation(p, rq, 0, success);
2533}
2534
2535/**
David Howells50fa6102009-04-28 15:01:38 +01002536 * wake_up_process - Wake up a specific process
2537 * @p: The process to be woken up.
2538 *
2539 * Attempt to wake up the nominated process and move it to the set of runnable
2540 * processes. Returns 1 if the process was woken up, 0 if it was already
2541 * running.
2542 *
2543 * It may be assumed that this function implies a write memory barrier before
2544 * changing the task state if and only if any tasks are woken up.
2545 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002546int wake_up_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05002548 return try_to_wake_up(p, TASK_ALL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550EXPORT_SYMBOL(wake_up_process);
2551
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002552int wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553{
2554 return try_to_wake_up(p, state, 0);
2555}
2556
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557/*
2558 * Perform scheduler related setup for a newly forked process p.
2559 * p is forked by current.
Ingo Molnardd41f592007-07-09 18:51:59 +02002560 *
2561 * __sched_fork() is basic setup used by init_idle() too:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002563static void __sched_fork(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564{
Ingo Molnardd41f592007-07-09 18:51:59 +02002565 p->se.exec_start = 0;
2566 p->se.sum_exec_runtime = 0;
Ingo Molnarf6cf8912007-08-28 12:53:24 +02002567 p->se.prev_sum_exec_runtime = 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002568 p->se.nr_migrations = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002569
2570#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03002571 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002572#endif
Nick Piggin476d1392005-06-25 14:57:29 -07002573
Peter Zijlstrafa717062008-01-25 21:08:27 +01002574 INIT_LIST_HEAD(&p->rt.run_list);
Ingo Molnardd41f592007-07-09 18:51:59 +02002575 p->se.on_rq = 0;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02002576 INIT_LIST_HEAD(&p->se.group_node);
Nick Piggin476d1392005-06-25 14:57:29 -07002577
Avi Kivitye107be32007-07-26 13:40:43 +02002578#ifdef CONFIG_PREEMPT_NOTIFIERS
2579 INIT_HLIST_HEAD(&p->preempt_notifiers);
2580#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002581}
2582
2583/*
2584 * fork()/clone()-time setup:
2585 */
2586void sched_fork(struct task_struct *p, int clone_flags)
2587{
2588 int cpu = get_cpu();
2589
2590 __sched_fork(p);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002591 /*
Peter Zijlstra0017d732010-03-24 18:34:10 +01002592 * We mark the process as running here. This guarantees that
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002593 * nobody will actually run it, and a signal or other external
2594 * event cannot wake it up and insert it on the runqueue either.
2595 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01002596 p->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02002597
Ingo Molnarb29739f2006-06-27 02:54:51 -07002598 /*
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002599 * Revert to default priority/policy on fork if requested.
2600 */
2601 if (unlikely(p->sched_reset_on_fork)) {
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002602 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002603 p->policy = SCHED_NORMAL;
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002604 p->normal_prio = p->static_prio;
2605 }
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002606
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002607 if (PRIO_TO_NICE(p->static_prio) < 0) {
2608 p->static_prio = NICE_TO_PRIO(0);
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002609 p->normal_prio = p->static_prio;
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002610 set_load_weight(p);
2611 }
2612
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002613 /*
2614 * We don't need the reset flag anymore after the fork. It has
2615 * fulfilled its duty:
2616 */
2617 p->sched_reset_on_fork = 0;
2618 }
Lennart Poetteringca94c442009-06-15 17:17:47 +02002619
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002620 /*
2621 * Make sure we do not leak PI boosting priority to the child.
2622 */
2623 p->prio = current->normal_prio;
2624
Hiroshi Shimamoto2ddbf952007-10-15 17:00:11 +02002625 if (!rt_prio(p->prio))
2626 p->sched_class = &fair_sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07002627
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002628 if (p->sched_class->task_fork)
2629 p->sched_class->task_fork(p);
2630
Peter Zijlstra86951592010-06-22 11:44:53 +02002631 /*
2632 * The child is not yet in the pid-hash so no cgroup attach races,
2633 * and the cgroup is pinned to this child due to cgroup_fork()
2634 * is ran before sched_fork().
2635 *
2636 * Silence PROVE_RCU.
2637 */
2638 rcu_read_lock();
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002639 set_task_cpu(p, cpu);
Peter Zijlstra86951592010-06-22 11:44:53 +02002640 rcu_read_unlock();
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002641
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002642#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Ingo Molnardd41f592007-07-09 18:51:59 +02002643 if (likely(sched_info_on()))
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002644 memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645#endif
Chen, Kenneth Wd6077cb2006-02-14 13:53:10 -08002646#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
Nick Piggin4866cde2005-06-25 14:57:23 -07002647 p->oncpu = 0;
2648#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649#ifdef CONFIG_PREEMPT
Nick Piggin4866cde2005-06-25 14:57:23 -07002650 /* Want to start with kernel preemption disabled. */
Al Viroa1261f52005-11-13 16:06:55 -08002651 task_thread_info(p)->preempt_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652#endif
Dario Faggioli806c09a2010-11-30 19:51:33 +01002653#ifdef CONFIG_SMP
Gregory Haskins917b6272008-12-29 09:39:53 -05002654 plist_node_init(&p->pushable_tasks, MAX_PRIO);
Dario Faggioli806c09a2010-11-30 19:51:33 +01002655#endif
Gregory Haskins917b6272008-12-29 09:39:53 -05002656
Nick Piggin476d1392005-06-25 14:57:29 -07002657 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658}
2659
2660/*
2661 * wake_up_new_task - wake up a newly created task for the first time.
2662 *
2663 * This function will do some initial scheduler statistics housekeeping
2664 * that must be done for every newly created context, then puts the task
2665 * on the runqueue and wakes it.
2666 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002667void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668{
2669 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002670 struct rq *rq;
Andrew Mortonc8906922010-03-11 14:08:43 -08002671 int cpu __maybe_unused = get_cpu();
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002672
2673#ifdef CONFIG_SMP
Peter Zijlstra0017d732010-03-24 18:34:10 +01002674 rq = task_rq_lock(p, &flags);
2675 p->state = TASK_WAKING;
2676
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002677 /*
2678 * Fork balancing, do it here and not earlier because:
2679 * - cpus_allowed can change in the fork path
2680 * - any previously selected cpu might disappear through hotplug
2681 *
Peter Zijlstra0017d732010-03-24 18:34:10 +01002682 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2683 * without people poking at ->cpus_allowed.
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002684 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01002685 cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002686 set_task_cpu(p, cpu);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002687
2688 p->state = TASK_RUNNING;
2689 task_rq_unlock(rq, &flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002690#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
Peter Zijlstra0017d732010-03-24 18:34:10 +01002692 rq = task_rq_lock(p, &flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002693 activate_task(rq, p, 0);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002694 trace_sched_wakeup_new(p, 1);
Peter Zijlstraa7558e02009-09-14 20:02:34 +02002695 check_preempt_curr(rq, p, WF_FORK);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002696#ifdef CONFIG_SMP
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002697 if (p->sched_class->task_woken)
2698 p->sched_class->task_woken(rq, p);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002699#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002700 task_rq_unlock(rq, &flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002701 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702}
2703
Avi Kivitye107be32007-07-26 13:40:43 +02002704#ifdef CONFIG_PREEMPT_NOTIFIERS
2705
2706/**
Luis Henriques80dd99b2009-03-16 19:58:09 +00002707 * preempt_notifier_register - tell me when current is being preempted & rescheduled
Randy Dunlap421cee22007-07-31 00:37:50 -07002708 * @notifier: notifier struct to register
Avi Kivitye107be32007-07-26 13:40:43 +02002709 */
2710void preempt_notifier_register(struct preempt_notifier *notifier)
2711{
2712 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2713}
2714EXPORT_SYMBOL_GPL(preempt_notifier_register);
2715
2716/**
2717 * preempt_notifier_unregister - no longer interested in preemption notifications
Randy Dunlap421cee22007-07-31 00:37:50 -07002718 * @notifier: notifier struct to unregister
Avi Kivitye107be32007-07-26 13:40:43 +02002719 *
2720 * This is safe to call from within a preemption notifier.
2721 */
2722void preempt_notifier_unregister(struct preempt_notifier *notifier)
2723{
2724 hlist_del(&notifier->link);
2725}
2726EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2727
2728static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2729{
2730 struct preempt_notifier *notifier;
2731 struct hlist_node *node;
2732
2733 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2734 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2735}
2736
2737static void
2738fire_sched_out_preempt_notifiers(struct task_struct *curr,
2739 struct task_struct *next)
2740{
2741 struct preempt_notifier *notifier;
2742 struct hlist_node *node;
2743
2744 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2745 notifier->ops->sched_out(notifier, next);
2746}
2747
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002748#else /* !CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002749
2750static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2751{
2752}
2753
2754static void
2755fire_sched_out_preempt_notifiers(struct task_struct *curr,
2756 struct task_struct *next)
2757{
2758}
2759
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002760#endif /* CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002761
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762/**
Nick Piggin4866cde2005-06-25 14:57:23 -07002763 * prepare_task_switch - prepare to switch tasks
2764 * @rq: the runqueue preparing to switch
Randy Dunlap421cee22007-07-31 00:37:50 -07002765 * @prev: the current task that is being switched out
Nick Piggin4866cde2005-06-25 14:57:23 -07002766 * @next: the task we are going to switch to.
2767 *
2768 * This is called with the rq lock held and interrupts off. It must
2769 * be paired with a subsequent finish_task_switch after the context
2770 * switch.
2771 *
2772 * prepare_task_switch sets up locking and calls architecture specific
2773 * hooks.
2774 */
Avi Kivitye107be32007-07-26 13:40:43 +02002775static inline void
2776prepare_task_switch(struct rq *rq, struct task_struct *prev,
2777 struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -07002778{
Avi Kivitye107be32007-07-26 13:40:43 +02002779 fire_sched_out_preempt_notifiers(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002780 prepare_lock_switch(rq, next);
2781 prepare_arch_switch(next);
2782}
2783
2784/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 * finish_task_switch - clean up after a task-switch
Jeff Garzik344baba2005-09-07 01:15:17 -04002786 * @rq: runqueue associated with task-switch
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 * @prev: the thread we just switched away from.
2788 *
Nick Piggin4866cde2005-06-25 14:57:23 -07002789 * finish_task_switch must be called after the context switch, paired
2790 * with a prepare_task_switch call before the context switch.
2791 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2792 * and do any other architecture-specific cleanup actions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 *
2794 * Note that we may have delayed dropping an mm in context_switch(). If
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002795 * so, we finish that here outside of the runqueue lock. (Doing it
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 * with the lock held can cause deadlocks; see schedule() for
2797 * details.)
2798 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02002799static void finish_task_switch(struct rq *rq, struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 __releases(rq->lock)
2801{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 struct mm_struct *mm = rq->prev_mm;
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002803 long prev_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
2805 rq->prev_mm = NULL;
2806
2807 /*
2808 * A task struct has one reference for the use as "current".
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002809 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002810 * schedule one last time. The schedule call will never return, and
2811 * the scheduled task must drop that reference.
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002812 * The test for TASK_DEAD must occur while the runqueue locks are
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 * still held, otherwise prev could be scheduled on another cpu, die
2814 * there before we look at prev->state, and then the reference would
2815 * be dropped twice.
2816 * Manfred Spraul <manfred@colorfullife.com>
2817 */
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002818 prev_state = prev->state;
Nick Piggin4866cde2005-06-25 14:57:23 -07002819 finish_arch_switch(prev);
Jamie Iles8381f652010-01-08 15:27:33 +00002820#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2821 local_irq_disable();
2822#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Peter Zijlstra49f47432009-12-27 11:51:52 +01002823 perf_event_task_sched_in(current);
Jamie Iles8381f652010-01-08 15:27:33 +00002824#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2825 local_irq_enable();
2826#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Nick Piggin4866cde2005-06-25 14:57:23 -07002827 finish_lock_switch(rq, prev);
Steven Rostedte8fa1362008-01-25 21:08:05 +01002828
Avi Kivitye107be32007-07-26 13:40:43 +02002829 fire_sched_in_preempt_notifiers(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 if (mm)
2831 mmdrop(mm);
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002832 if (unlikely(prev_state == TASK_DEAD)) {
bibo maoc6fd91f2006-03-26 01:38:20 -08002833 /*
2834 * Remove function-return probe instances associated with this
2835 * task and put them back on the free list.
Ingo Molnar9761eea2007-07-09 18:52:00 +02002836 */
bibo maoc6fd91f2006-03-26 01:38:20 -08002837 kprobe_flush_task(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 put_task_struct(prev);
bibo maoc6fd91f2006-03-26 01:38:20 -08002839 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840}
2841
Gregory Haskins3f029d32009-07-29 11:08:47 -04002842#ifdef CONFIG_SMP
2843
2844/* assumes rq->lock is held */
2845static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2846{
2847 if (prev->sched_class->pre_schedule)
2848 prev->sched_class->pre_schedule(rq, prev);
2849}
2850
2851/* rq->lock is NOT held, but preemption is disabled */
2852static inline void post_schedule(struct rq *rq)
2853{
2854 if (rq->post_schedule) {
2855 unsigned long flags;
2856
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002857 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002858 if (rq->curr->sched_class->post_schedule)
2859 rq->curr->sched_class->post_schedule(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002860 raw_spin_unlock_irqrestore(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002861
2862 rq->post_schedule = 0;
2863 }
2864}
2865
2866#else
2867
2868static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2869{
2870}
2871
2872static inline void post_schedule(struct rq *rq)
2873{
2874}
2875
2876#endif
2877
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878/**
2879 * schedule_tail - first thing a freshly forked thread must call.
2880 * @prev: the thread we just switched away from.
2881 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002882asmlinkage void schedule_tail(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 __releases(rq->lock)
2884{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002885 struct rq *rq = this_rq();
2886
Nick Piggin4866cde2005-06-25 14:57:23 -07002887 finish_task_switch(rq, prev);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002888
Gregory Haskins3f029d32009-07-29 11:08:47 -04002889 /*
2890 * FIXME: do we need to worry about rq being invalidated by the
2891 * task_switch?
2892 */
2893 post_schedule(rq);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002894
Nick Piggin4866cde2005-06-25 14:57:23 -07002895#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2896 /* In this case, finish_task_switch does not reenable preemption */
2897 preempt_enable();
2898#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 if (current->set_child_tid)
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002900 put_user(task_pid_vnr(current), current->set_child_tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901}
2902
2903/*
2904 * context_switch - switch to the new MM and the new
2905 * thread's register state.
2906 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002907static inline void
Ingo Molnar70b97a72006-07-03 00:25:42 -07002908context_switch(struct rq *rq, struct task_struct *prev,
Ingo Molnar36c8b582006-07-03 00:25:41 -07002909 struct task_struct *next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910{
Ingo Molnardd41f592007-07-09 18:51:59 +02002911 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
Avi Kivitye107be32007-07-26 13:40:43 +02002913 prepare_task_switch(rq, prev, next);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002914 trace_sched_switch(prev, next);
Ingo Molnardd41f592007-07-09 18:51:59 +02002915 mm = next->mm;
2916 oldmm = prev->active_mm;
Zachary Amsden9226d122007-02-13 13:26:21 +01002917 /*
2918 * For paravirt, this is coupled with an exit in switch_to to
2919 * combine the page table reload and the switch backend into
2920 * one hypercall.
2921 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -08002922 arch_start_context_switch(prev);
Zachary Amsden9226d122007-02-13 13:26:21 +01002923
Heiko Carstens31915ab2010-09-16 14:42:25 +02002924 if (!mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 next->active_mm = oldmm;
2926 atomic_inc(&oldmm->mm_count);
2927 enter_lazy_tlb(oldmm, next);
2928 } else
2929 switch_mm(oldmm, mm, next);
2930
Heiko Carstens31915ab2010-09-16 14:42:25 +02002931 if (!prev->mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 prev->active_mm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 rq->prev_mm = oldmm;
2934 }
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002935 /*
2936 * Since the runqueue lock will be released by the next
2937 * task (which is an invalid locking op but in the case
2938 * of the scheduler it's an obvious special-case), so we
2939 * do an early lockdep release here:
2940 */
2941#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07002942 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002943#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944
2945 /* Here we just switch the register state and the stack. */
2946 switch_to(prev, next, prev);
2947
Ingo Molnardd41f592007-07-09 18:51:59 +02002948 barrier();
2949 /*
2950 * this_rq must be evaluated again because prev may have moved
2951 * CPUs since it called schedule(), thus the 'rq' on its stack
2952 * frame will be invalid.
2953 */
2954 finish_task_switch(this_rq(), prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955}
2956
2957/*
2958 * nr_running, nr_uninterruptible and nr_context_switches:
2959 *
2960 * externally visible scheduler statistics: current number of runnable
2961 * threads, current number of uninterruptible-sleeping threads, total
2962 * number of context switches performed since bootup.
2963 */
2964unsigned long nr_running(void)
2965{
2966 unsigned long i, sum = 0;
2967
2968 for_each_online_cpu(i)
2969 sum += cpu_rq(i)->nr_running;
2970
2971 return sum;
2972}
2973
2974unsigned long nr_uninterruptible(void)
2975{
2976 unsigned long i, sum = 0;
2977
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002978 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 sum += cpu_rq(i)->nr_uninterruptible;
2980
2981 /*
2982 * Since we read the counters lockless, it might be slightly
2983 * inaccurate. Do not allow it to go below zero though:
2984 */
2985 if (unlikely((long)sum < 0))
2986 sum = 0;
2987
2988 return sum;
2989}
2990
2991unsigned long long nr_context_switches(void)
2992{
Steven Rostedtcc94abf2006-06-27 02:54:31 -07002993 int i;
2994 unsigned long long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08002996 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 sum += cpu_rq(i)->nr_switches;
2998
2999 return sum;
3000}
3001
3002unsigned long nr_iowait(void)
3003{
3004 unsigned long i, sum = 0;
3005
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003006 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 sum += atomic_read(&cpu_rq(i)->nr_iowait);
3008
3009 return sum;
3010}
3011
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003012unsigned long nr_iowait_cpu(int cpu)
Arjan van de Ven69d25872009-09-21 17:04:08 -07003013{
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003014 struct rq *this = cpu_rq(cpu);
Arjan van de Ven69d25872009-09-21 17:04:08 -07003015 return atomic_read(&this->nr_iowait);
3016}
3017
3018unsigned long this_cpu_load(void)
3019{
3020 struct rq *this = this_rq();
3021 return this->cpu_load[0];
3022}
3023
3024
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003025/* Variables and functions for calc_load */
3026static atomic_long_t calc_load_tasks;
3027static unsigned long calc_load_update;
3028unsigned long avenrun[3];
3029EXPORT_SYMBOL(avenrun);
3030
Peter Zijlstra74f51872010-04-22 21:50:19 +02003031static long calc_load_fold_active(struct rq *this_rq)
3032{
3033 long nr_active, delta = 0;
3034
3035 nr_active = this_rq->nr_running;
3036 nr_active += (long) this_rq->nr_uninterruptible;
3037
3038 if (nr_active != this_rq->calc_load_active) {
3039 delta = nr_active - this_rq->calc_load_active;
3040 this_rq->calc_load_active = nr_active;
3041 }
3042
3043 return delta;
3044}
3045
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003046static unsigned long
3047calc_load(unsigned long load, unsigned long exp, unsigned long active)
3048{
3049 load *= exp;
3050 load += active * (FIXED_1 - exp);
3051 load += 1UL << (FSHIFT - 1);
3052 return load >> FSHIFT;
3053}
3054
Peter Zijlstra74f51872010-04-22 21:50:19 +02003055#ifdef CONFIG_NO_HZ
3056/*
3057 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
3058 *
3059 * When making the ILB scale, we should try to pull this in as well.
3060 */
3061static atomic_long_t calc_load_tasks_idle;
3062
3063static void calc_load_account_idle(struct rq *this_rq)
3064{
3065 long delta;
3066
3067 delta = calc_load_fold_active(this_rq);
3068 if (delta)
3069 atomic_long_add(delta, &calc_load_tasks_idle);
3070}
3071
3072static long calc_load_fold_idle(void)
3073{
3074 long delta = 0;
3075
3076 /*
3077 * Its got a race, we don't care...
3078 */
3079 if (atomic_long_read(&calc_load_tasks_idle))
3080 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3081
3082 return delta;
3083}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003084
3085/**
3086 * fixed_power_int - compute: x^n, in O(log n) time
3087 *
3088 * @x: base of the power
3089 * @frac_bits: fractional bits of @x
3090 * @n: power to raise @x to.
3091 *
3092 * By exploiting the relation between the definition of the natural power
3093 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
3094 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3095 * (where: n_i \elem {0, 1}, the binary vector representing n),
3096 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3097 * of course trivially computable in O(log_2 n), the length of our binary
3098 * vector.
3099 */
3100static unsigned long
3101fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3102{
3103 unsigned long result = 1UL << frac_bits;
3104
3105 if (n) for (;;) {
3106 if (n & 1) {
3107 result *= x;
3108 result += 1UL << (frac_bits - 1);
3109 result >>= frac_bits;
3110 }
3111 n >>= 1;
3112 if (!n)
3113 break;
3114 x *= x;
3115 x += 1UL << (frac_bits - 1);
3116 x >>= frac_bits;
3117 }
3118
3119 return result;
3120}
3121
3122/*
3123 * a1 = a0 * e + a * (1 - e)
3124 *
3125 * a2 = a1 * e + a * (1 - e)
3126 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3127 * = a0 * e^2 + a * (1 - e) * (1 + e)
3128 *
3129 * a3 = a2 * e + a * (1 - e)
3130 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3131 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3132 *
3133 * ...
3134 *
3135 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3136 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3137 * = a0 * e^n + a * (1 - e^n)
3138 *
3139 * [1] application of the geometric series:
3140 *
3141 * n 1 - x^(n+1)
3142 * S_n := \Sum x^i = -------------
3143 * i=0 1 - x
3144 */
3145static unsigned long
3146calc_load_n(unsigned long load, unsigned long exp,
3147 unsigned long active, unsigned int n)
3148{
3149
3150 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3151}
3152
3153/*
3154 * NO_HZ can leave us missing all per-cpu ticks calling
3155 * calc_load_account_active(), but since an idle CPU folds its delta into
3156 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3157 * in the pending idle delta if our idle period crossed a load cycle boundary.
3158 *
3159 * Once we've updated the global active value, we need to apply the exponential
3160 * weights adjusted to the number of cycles missed.
3161 */
3162static void calc_global_nohz(unsigned long ticks)
3163{
3164 long delta, active, n;
3165
3166 if (time_before(jiffies, calc_load_update))
3167 return;
3168
3169 /*
3170 * If we crossed a calc_load_update boundary, make sure to fold
3171 * any pending idle changes, the respective CPUs might have
3172 * missed the tick driven calc_load_account_active() update
3173 * due to NO_HZ.
3174 */
3175 delta = calc_load_fold_idle();
3176 if (delta)
3177 atomic_long_add(delta, &calc_load_tasks);
3178
3179 /*
3180 * If we were idle for multiple load cycles, apply them.
3181 */
3182 if (ticks >= LOAD_FREQ) {
3183 n = ticks / LOAD_FREQ;
3184
3185 active = atomic_long_read(&calc_load_tasks);
3186 active = active > 0 ? active * FIXED_1 : 0;
3187
3188 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3189 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3190 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3191
3192 calc_load_update += n * LOAD_FREQ;
3193 }
3194
3195 /*
3196 * Its possible the remainder of the above division also crosses
3197 * a LOAD_FREQ period, the regular check in calc_global_load()
3198 * which comes after this will take care of that.
3199 *
3200 * Consider us being 11 ticks before a cycle completion, and us
3201 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3202 * age us 4 cycles, and the test in calc_global_load() will
3203 * pick up the final one.
3204 */
3205}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003206#else
3207static void calc_load_account_idle(struct rq *this_rq)
3208{
3209}
3210
3211static inline long calc_load_fold_idle(void)
3212{
3213 return 0;
3214}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003215
3216static void calc_global_nohz(unsigned long ticks)
3217{
3218}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003219#endif
3220
Thomas Gleixner2d024942009-05-02 20:08:52 +02003221/**
3222 * get_avenrun - get the load average array
3223 * @loads: pointer to dest load array
3224 * @offset: offset to add
3225 * @shift: shift count to shift the result left
3226 *
3227 * These values are estimates at best, so no need for locking.
3228 */
3229void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3230{
3231 loads[0] = (avenrun[0] + offset) << shift;
3232 loads[1] = (avenrun[1] + offset) << shift;
3233 loads[2] = (avenrun[2] + offset) << shift;
3234}
3235
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003236/*
3237 * calc_load - update the avenrun load estimates 10 ticks after the
3238 * CPUs have updated calc_load_tasks.
3239 */
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003240void calc_global_load(unsigned long ticks)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003241{
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003242 long active;
3243
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003244 calc_global_nohz(ticks);
3245
3246 if (time_before(jiffies, calc_load_update + 10))
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003247 return;
3248
3249 active = atomic_long_read(&calc_load_tasks);
3250 active = active > 0 ? active * FIXED_1 : 0;
3251
3252 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3253 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3254 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3255
3256 calc_load_update += LOAD_FREQ;
3257}
3258
3259/*
Peter Zijlstra74f51872010-04-22 21:50:19 +02003260 * Called from update_cpu_load() to periodically update this CPU's
3261 * active count.
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003262 */
3263static void calc_load_account_active(struct rq *this_rq)
3264{
Peter Zijlstra74f51872010-04-22 21:50:19 +02003265 long delta;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003266
Peter Zijlstra74f51872010-04-22 21:50:19 +02003267 if (time_before(jiffies, this_rq->calc_load_update))
3268 return;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003269
Peter Zijlstra74f51872010-04-22 21:50:19 +02003270 delta = calc_load_fold_active(this_rq);
3271 delta += calc_load_fold_idle();
3272 if (delta)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003273 atomic_long_add(delta, &calc_load_tasks);
Peter Zijlstra74f51872010-04-22 21:50:19 +02003274
3275 this_rq->calc_load_update += LOAD_FREQ;
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08003276}
3277
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278/*
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003279 * The exact cpuload at various idx values, calculated at every tick would be
3280 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3281 *
3282 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3283 * on nth tick when cpu may be busy, then we have:
3284 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3285 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3286 *
3287 * decay_load_missed() below does efficient calculation of
3288 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3289 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3290 *
3291 * The calculation is approximated on a 128 point scale.
3292 * degrade_zero_ticks is the number of ticks after which load at any
3293 * particular idx is approximated to be zero.
3294 * degrade_factor is a precomputed table, a row for each load idx.
3295 * Each column corresponds to degradation factor for a power of two ticks,
3296 * based on 128 point scale.
3297 * Example:
3298 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3299 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3300 *
3301 * With this power of 2 load factors, we can degrade the load n times
3302 * by looking at 1 bits in n and doing as many mult/shift instead of
3303 * n mult/shifts needed by the exact degradation.
3304 */
3305#define DEGRADE_SHIFT 7
3306static const unsigned char
3307 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3308static const unsigned char
3309 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3310 {0, 0, 0, 0, 0, 0, 0, 0},
3311 {64, 32, 8, 0, 0, 0, 0, 0},
3312 {96, 72, 40, 12, 1, 0, 0},
3313 {112, 98, 75, 43, 15, 1, 0},
3314 {120, 112, 98, 76, 45, 16, 2} };
3315
3316/*
3317 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3318 * would be when CPU is idle and so we just decay the old load without
3319 * adding any new load.
3320 */
3321static unsigned long
3322decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3323{
3324 int j = 0;
3325
3326 if (!missed_updates)
3327 return load;
3328
3329 if (missed_updates >= degrade_zero_ticks[idx])
3330 return 0;
3331
3332 if (idx == 1)
3333 return load >> missed_updates;
3334
3335 while (missed_updates) {
3336 if (missed_updates % 2)
3337 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3338
3339 missed_updates >>= 1;
3340 j++;
3341 }
3342 return load;
3343}
3344
3345/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003346 * Update rq->cpu_load[] statistics. This function is usually called every
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003347 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3348 * every tick. We fix it up based on jiffies.
Ingo Molnar48f24c42006-07-03 00:25:40 -07003349 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003350static void update_cpu_load(struct rq *this_rq)
Ingo Molnar48f24c42006-07-03 00:25:40 -07003351{
Dmitry Adamushko495eca42007-10-15 17:00:06 +02003352 unsigned long this_load = this_rq->load.weight;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003353 unsigned long curr_jiffies = jiffies;
3354 unsigned long pending_updates;
Ingo Molnardd41f592007-07-09 18:51:59 +02003355 int i, scale;
3356
3357 this_rq->nr_load_updates++;
Ingo Molnardd41f592007-07-09 18:51:59 +02003358
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003359 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3360 if (curr_jiffies == this_rq->last_load_update_tick)
3361 return;
3362
3363 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3364 this_rq->last_load_update_tick = curr_jiffies;
3365
Ingo Molnardd41f592007-07-09 18:51:59 +02003366 /* Update our load: */
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003367 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3368 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
Ingo Molnardd41f592007-07-09 18:51:59 +02003369 unsigned long old_load, new_load;
3370
3371 /* scale is effectively 1 << i now, and >> i divides by scale */
3372
3373 old_load = this_rq->cpu_load[i];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003374 old_load = decay_load_missed(old_load, pending_updates - 1, i);
Ingo Molnardd41f592007-07-09 18:51:59 +02003375 new_load = this_load;
Ingo Molnara25707f2007-10-15 17:00:03 +02003376 /*
3377 * Round up the averaging division if load is increasing. This
3378 * prevents us from getting stuck on 9 if the load is 10, for
3379 * example.
3380 */
3381 if (new_load > old_load)
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003382 new_load += scale - 1;
3383
3384 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
Ingo Molnardd41f592007-07-09 18:51:59 +02003385 }
Suresh Siddhada2b71e2010-08-23 13:42:51 -07003386
3387 sched_avg_update(this_rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003388}
3389
3390static void update_cpu_load_active(struct rq *this_rq)
3391{
3392 update_cpu_load(this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003393
Peter Zijlstra74f51872010-04-22 21:50:19 +02003394 calc_load_account_active(this_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07003395}
3396
Ingo Molnardd41f592007-07-09 18:51:59 +02003397#ifdef CONFIG_SMP
3398
Ingo Molnar48f24c42006-07-03 00:25:40 -07003399/*
Peter Zijlstra38022902009-12-16 18:04:37 +01003400 * sched_exec - execve() is a valuable balancing opportunity, because at
3401 * this point the task has the smallest effective memory and cache footprint.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 */
Peter Zijlstra38022902009-12-16 18:04:37 +01003403void sched_exec(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404{
Peter Zijlstra38022902009-12-16 18:04:37 +01003405 struct task_struct *p = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003407 struct rq *rq;
Peter Zijlstra0017d732010-03-24 18:34:10 +01003408 int dest_cpu;
Peter Zijlstra38022902009-12-16 18:04:37 +01003409
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 rq = task_rq_lock(p, &flags);
Peter Zijlstra0017d732010-03-24 18:34:10 +01003411 dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
3412 if (dest_cpu == smp_processor_id())
3413 goto unlock;
Peter Zijlstra38022902009-12-16 18:04:37 +01003414
3415 /*
3416 * select_task_rq() can race against ->cpus_allowed
3417 */
Oleg Nesterov30da6882010-03-15 10:10:19 +01003418 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
Nikanth Karthikesanb7a2b392010-11-26 12:37:09 +05303419 likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) {
Tejun Heo969c7922010-05-06 18:49:21 +02003420 struct migration_arg arg = { p, dest_cpu };
Ingo Molnar36c8b582006-07-03 00:25:41 -07003421
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 task_rq_unlock(rq, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02003423 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 return;
3425 }
Peter Zijlstra0017d732010-03-24 18:34:10 +01003426unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 task_rq_unlock(rq, &flags);
3428}
3429
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430#endif
3431
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432DEFINE_PER_CPU(struct kernel_stat, kstat);
3433
3434EXPORT_PER_CPU_SYMBOL(kstat);
3435
3436/*
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003437 * Return any ns on the sched_clock that have not yet been accounted in
Frank Mayharf06febc2008-09-12 09:54:39 -07003438 * @p in case that task is currently running.
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003439 *
3440 * Called with task_rq_lock() held on @rq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 */
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003442static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3443{
3444 u64 ns = 0;
3445
3446 if (task_current(rq, p)) {
3447 update_rq_clock(rq);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07003448 ns = rq->clock_task - p->se.exec_start;
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003449 if ((s64)ns < 0)
3450 ns = 0;
3451 }
3452
3453 return ns;
3454}
3455
Frank Mayharbb34d922008-09-12 09:54:39 -07003456unsigned long long task_delta_exec(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 unsigned long flags;
Ingo Molnar41b86e92007-07-09 18:51:58 +02003459 struct rq *rq;
Frank Mayharbb34d922008-09-12 09:54:39 -07003460 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003461
Ingo Molnar41b86e92007-07-09 18:51:58 +02003462 rq = task_rq_lock(p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003463 ns = do_task_delta_exec(p, rq);
3464 task_rq_unlock(rq, &flags);
Ingo Molnar15084872008-09-30 08:28:17 +02003465
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003466 return ns;
3467}
Frank Mayharf06febc2008-09-12 09:54:39 -07003468
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003469/*
3470 * Return accounted runtime for the task.
3471 * In case the task is currently running, return the runtime plus current's
3472 * pending runtime that have not been accounted yet.
3473 */
3474unsigned long long task_sched_runtime(struct task_struct *p)
3475{
3476 unsigned long flags;
3477 struct rq *rq;
3478 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003479
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003480 rq = task_rq_lock(p, &flags);
3481 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3482 task_rq_unlock(rq, &flags);
3483
3484 return ns;
3485}
3486
3487/*
3488 * Return sum_exec_runtime for the thread group.
3489 * In case the task is currently running, return the sum plus current's
3490 * pending runtime that have not been accounted yet.
3491 *
3492 * Note that the thread group might have other running tasks as well,
3493 * so the return value not includes other pending runtime that other
3494 * running tasks might have.
3495 */
3496unsigned long long thread_group_sched_runtime(struct task_struct *p)
3497{
3498 struct task_cputime totals;
3499 unsigned long flags;
3500 struct rq *rq;
3501 u64 ns;
3502
3503 rq = task_rq_lock(p, &flags);
3504 thread_group_cputime(p, &totals);
3505 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506 task_rq_unlock(rq, &flags);
3507
3508 return ns;
3509}
3510
3511/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 * Account user cpu time to a process.
3513 * @p: the process that the cpu time gets accounted to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 * @cputime: the cpu time spent in user space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003515 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003517void account_user_time(struct task_struct *p, cputime_t cputime,
3518 cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519{
3520 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3521 cputime64_t tmp;
3522
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003523 /* Add user time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003525 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003526 account_group_user_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527
3528 /* Add user time to cpustat. */
3529 tmp = cputime_to_cputime64(cputime);
3530 if (TASK_NICE(p) > 0)
3531 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3532 else
3533 cpustat->user = cputime64_add(cpustat->user, tmp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05303534
3535 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
Jonathan Lim49b5cf32008-07-25 01:48:40 -07003536 /* Account for user time used */
3537 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538}
3539
3540/*
Laurent Vivier94886b82007-10-15 17:00:19 +02003541 * Account guest cpu time to a process.
3542 * @p: the process that the cpu time gets accounted to
3543 * @cputime: the cpu time spent in virtual machine since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003544 * @cputime_scaled: cputime scaled by cpu frequency
Laurent Vivier94886b82007-10-15 17:00:19 +02003545 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003546static void account_guest_time(struct task_struct *p, cputime_t cputime,
3547 cputime_t cputime_scaled)
Laurent Vivier94886b82007-10-15 17:00:19 +02003548{
3549 cputime64_t tmp;
3550 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3551
3552 tmp = cputime_to_cputime64(cputime);
3553
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003554 /* Add guest time to process. */
Laurent Vivier94886b82007-10-15 17:00:19 +02003555 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003556 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003557 account_group_user_time(p, cputime);
Laurent Vivier94886b82007-10-15 17:00:19 +02003558 p->gtime = cputime_add(p->gtime, cputime);
3559
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003560 /* Add guest time to cpustat. */
Ryota Ozakice0e7b22009-10-24 01:20:10 +09003561 if (TASK_NICE(p) > 0) {
3562 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3563 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3564 } else {
3565 cpustat->user = cputime64_add(cpustat->user, tmp);
3566 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3567 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003568}
3569
3570/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 * Account system cpu time to a process.
3572 * @p: the process that the cpu time gets accounted to
3573 * @hardirq_offset: the offset to subtract from hardirq_count()
3574 * @cputime: the cpu time spent in kernel space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003575 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576 */
3577void account_system_time(struct task_struct *p, int hardirq_offset,
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003578 cputime_t cputime, cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579{
3580 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581 cputime64_t tmp;
3582
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003583 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003584 account_guest_time(p, cputime, cputime_scaled);
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003585 return;
3586 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003587
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003588 /* Add system time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589 p->stime = cputime_add(p->stime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003590 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003591 account_group_system_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
3593 /* Add system time to cpustat. */
3594 tmp = cputime_to_cputime64(cputime);
3595 if (hardirq_count() - hardirq_offset)
3596 cpustat->irq = cputime64_add(cpustat->irq, tmp);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07003597 else if (in_serving_softirq())
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599 else
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003600 cpustat->system = cputime64_add(cpustat->system, tmp);
3601
Bharata B Raoef12fef2009-03-31 10:02:22 +05303602 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3603
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 /* Account for system time used */
3605 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606}
3607
3608/*
3609 * Account for involuntary wait time.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 * @steal: the cpu time spent in involuntary wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003612void account_steal_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003615 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3616
3617 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618}
3619
Christoph Lameter7835b982006-12-10 02:20:22 -08003620/*
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003621 * Account for idle time.
3622 * @cputime: the cpu time spent in idle wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003624void account_idle_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625{
3626 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003627 cputime64_t cputime64 = cputime_to_cputime64(cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 struct rq *rq = this_rq();
3629
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003630 if (atomic_read(&rq->nr_iowait) > 0)
3631 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3632 else
3633 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
Christoph Lameter7835b982006-12-10 02:20:22 -08003634}
3635
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003636#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3637
3638/*
3639 * Account a single tick of cpu time.
3640 * @p: the process that the cpu time gets accounted to
3641 * @user_tick: indicates if the tick is a user or a system tick
3642 */
3643void account_process_tick(struct task_struct *p, int user_tick)
3644{
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003645 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003646 struct rq *rq = this_rq();
3647
3648 if (user_tick)
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003649 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
Eric Dumazetf5f293a2009-04-29 14:44:49 +02003650 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003651 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003652 one_jiffy_scaled);
3653 else
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003654 account_idle_time(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003655}
3656
3657/*
3658 * Account multiple ticks of steal time.
3659 * @p: the process from which the cpu time has been stolen
3660 * @ticks: number of stolen ticks
3661 */
3662void account_steal_ticks(unsigned long ticks)
3663{
3664 account_steal_time(jiffies_to_cputime(ticks));
3665}
3666
3667/*
3668 * Account multiple ticks of idle time.
3669 * @ticks: number of stolen ticks
3670 */
3671void account_idle_ticks(unsigned long ticks)
3672{
3673 account_idle_time(jiffies_to_cputime(ticks));
3674}
3675
3676#endif
3677
Christoph Lameter7835b982006-12-10 02:20:22 -08003678/*
Balbir Singh49048622008-09-05 18:12:23 +02003679 * Use precise platform statistics if available:
3680 */
3681#ifdef CONFIG_VIRT_CPU_ACCOUNTING
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003682void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003683{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003684 *ut = p->utime;
3685 *st = p->stime;
Balbir Singh49048622008-09-05 18:12:23 +02003686}
3687
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003688void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003689{
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003690 struct task_cputime cputime;
3691
3692 thread_group_cputime(p, &cputime);
3693
3694 *ut = cputime.utime;
3695 *st = cputime.stime;
Balbir Singh49048622008-09-05 18:12:23 +02003696}
3697#else
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003698
3699#ifndef nsecs_to_cputime
Hidetoshi Setob7b20df92009-11-26 14:49:27 +09003700# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003701#endif
3702
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003703void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003704{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003705 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
Balbir Singh49048622008-09-05 18:12:23 +02003706
3707 /*
3708 * Use CFS's precise accounting:
3709 */
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003710 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
Balbir Singh49048622008-09-05 18:12:23 +02003711
3712 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003713 u64 temp = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003714
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003715 temp *= utime;
Balbir Singh49048622008-09-05 18:12:23 +02003716 do_div(temp, total);
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003717 utime = (cputime_t)temp;
3718 } else
3719 utime = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003720
3721 /*
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003722 * Compare with previous values, to keep monotonicity:
Balbir Singh49048622008-09-05 18:12:23 +02003723 */
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003724 p->prev_utime = max(p->prev_utime, utime);
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003725 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
Balbir Singh49048622008-09-05 18:12:23 +02003726
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003727 *ut = p->prev_utime;
3728 *st = p->prev_stime;
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003729}
Balbir Singh49048622008-09-05 18:12:23 +02003730
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003731/*
3732 * Must be called with siglock held.
3733 */
3734void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3735{
3736 struct signal_struct *sig = p->signal;
3737 struct task_cputime cputime;
3738 cputime_t rtime, utime, total;
3739
3740 thread_group_cputime(p, &cputime);
3741
3742 total = cputime_add(cputime.utime, cputime.stime);
3743 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3744
3745 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003746 u64 temp = rtime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003747
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003748 temp *= cputime.utime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003749 do_div(temp, total);
3750 utime = (cputime_t)temp;
3751 } else
3752 utime = rtime;
3753
3754 sig->prev_utime = max(sig->prev_utime, utime);
3755 sig->prev_stime = max(sig->prev_stime,
3756 cputime_sub(rtime, sig->prev_utime));
3757
3758 *ut = sig->prev_utime;
3759 *st = sig->prev_stime;
Balbir Singh49048622008-09-05 18:12:23 +02003760}
3761#endif
3762
Balbir Singh49048622008-09-05 18:12:23 +02003763/*
Christoph Lameter7835b982006-12-10 02:20:22 -08003764 * This function gets called by the timer code, with HZ frequency.
3765 * We call it with interrupts disabled.
3766 *
3767 * It also gets called by the fork code, when changing the parent's
3768 * timeslices.
3769 */
3770void scheduler_tick(void)
3771{
Christoph Lameter7835b982006-12-10 02:20:22 -08003772 int cpu = smp_processor_id();
3773 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003774 struct task_struct *curr = rq->curr;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003775
3776 sched_clock_tick();
Christoph Lameter7835b982006-12-10 02:20:22 -08003777
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003778 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003779 update_rq_clock(rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003780 update_cpu_load_active(rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01003781 curr->sched_class->task_tick(rq, curr, 0);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003782 raw_spin_unlock(&rq->lock);
Ingo Molnardd41f592007-07-09 18:51:59 +02003783
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02003784 perf_event_task_tick();
Peter Zijlstrae220d2d2009-05-23 18:28:55 +02003785
Christoph Lametere418e1c2006-12-10 02:20:23 -08003786#ifdef CONFIG_SMP
Ingo Molnardd41f592007-07-09 18:51:59 +02003787 rq->idle_at_tick = idle_cpu(cpu);
3788 trigger_load_balance(rq, cpu);
Christoph Lametere418e1c2006-12-10 02:20:23 -08003789#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790}
3791
Lai Jiangshan132380a2009-04-02 14:18:25 +08003792notrace unsigned long get_parent_ip(unsigned long addr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003793{
3794 if (in_lock_functions(addr)) {
3795 addr = CALLER_ADDR2;
3796 if (in_lock_functions(addr))
3797 addr = CALLER_ADDR3;
3798 }
3799 return addr;
3800}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801
Steven Rostedt7e49fcc2009-01-22 19:01:40 -05003802#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3803 defined(CONFIG_PREEMPT_TRACER))
3804
Srinivasa Ds43627582008-02-23 15:24:04 -08003805void __kprobes add_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003807#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808 /*
3809 * Underflow?
3810 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003811 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3812 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003813#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 preempt_count() += val;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003815#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 /*
3817 * Spinlock count overflowing soon?
3818 */
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08003819 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3820 PREEMPT_MASK - 10);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003821#endif
3822 if (preempt_count() == val)
3823 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824}
3825EXPORT_SYMBOL(add_preempt_count);
3826
Srinivasa Ds43627582008-02-23 15:24:04 -08003827void __kprobes sub_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003829#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 /*
3831 * Underflow?
3832 */
Ingo Molnar01e3eb82009-01-12 13:00:50 +01003833 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003834 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 /*
3836 * Is the spinlock portion underflowing?
3837 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003838 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3839 !(preempt_count() & PREEMPT_MASK)))
3840 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003841#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003842
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003843 if (preempt_count() == val)
3844 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845 preempt_count() -= val;
3846}
3847EXPORT_SYMBOL(sub_preempt_count);
3848
3849#endif
3850
3851/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003852 * Print scheduling while atomic bug:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003854static noinline void __schedule_bug(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855{
Satyam Sharma838225b2007-10-24 18:23:50 +02003856 struct pt_regs *regs = get_irq_regs();
3857
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01003858 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3859 prev->comm, prev->pid, preempt_count());
Satyam Sharma838225b2007-10-24 18:23:50 +02003860
Ingo Molnardd41f592007-07-09 18:51:59 +02003861 debug_show_held_locks(prev);
Arjan van de Vene21f5b12008-05-23 09:05:58 -07003862 print_modules();
Ingo Molnardd41f592007-07-09 18:51:59 +02003863 if (irqs_disabled())
3864 print_irqtrace_events(prev);
Satyam Sharma838225b2007-10-24 18:23:50 +02003865
3866 if (regs)
3867 show_regs(regs);
3868 else
3869 dump_stack();
Ingo Molnardd41f592007-07-09 18:51:59 +02003870}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871
Ingo Molnardd41f592007-07-09 18:51:59 +02003872/*
3873 * Various schedule()-time debugging checks and statistics:
3874 */
3875static inline void schedule_debug(struct task_struct *prev)
3876{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877 /*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003878 * Test if we are atomic. Since do_exit() needs to call into
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879 * schedule() atomically, we ignore that path for now.
3880 * Otherwise, whine if we are scheduling when we should not be.
3881 */
Roel Kluin3f33a7c2008-05-13 23:44:11 +02003882 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
Ingo Molnardd41f592007-07-09 18:51:59 +02003883 __schedule_bug(prev);
3884
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3886
Ingo Molnar2d723762007-10-15 17:00:12 +02003887 schedstat_inc(this_rq(), sched_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02003888#ifdef CONFIG_SCHEDSTATS
3889 if (unlikely(prev->lock_depth >= 0)) {
Ingo Molnar2d723762007-10-15 17:00:12 +02003890 schedstat_inc(this_rq(), bkl_count);
3891 schedstat_inc(prev, sched_info.bkl_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02003892 }
3893#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02003894}
3895
Peter Zijlstra6cecd082009-11-30 13:00:37 +01003896static void put_prev_task(struct rq *rq, struct task_struct *prev)
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01003897{
Mike Galbraitha64692a2010-03-11 17:16:20 +01003898 if (prev->se.on_rq)
3899 update_rq_clock(rq);
Peter Zijlstra6cecd082009-11-30 13:00:37 +01003900 prev->sched_class->put_prev_task(rq, prev);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01003901}
3902
Ingo Molnardd41f592007-07-09 18:51:59 +02003903/*
3904 * Pick up the highest-prio task:
3905 */
3906static inline struct task_struct *
Wang Chenb67802e2009-03-02 13:55:26 +08003907pick_next_task(struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02003908{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02003909 const struct sched_class *class;
Ingo Molnardd41f592007-07-09 18:51:59 +02003910 struct task_struct *p;
3911
3912 /*
3913 * Optimization: we know that if all tasks are in
3914 * the fair class we can call that function directly:
3915 */
3916 if (likely(rq->nr_running == rq->cfs.nr_running)) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003917 p = fair_sched_class.pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02003918 if (likely(p))
3919 return p;
3920 }
3921
Peter Zijlstra34f971f2010-09-22 13:53:15 +02003922 for_each_class(class) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003923 p = class->pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02003924 if (p)
3925 return p;
Ingo Molnardd41f592007-07-09 18:51:59 +02003926 }
Peter Zijlstra34f971f2010-09-22 13:53:15 +02003927
3928 BUG(); /* the idle class will always have a runnable task */
Ingo Molnardd41f592007-07-09 18:51:59 +02003929}
3930
3931/*
3932 * schedule() is the main scheduler function.
3933 */
Peter Zijlstraff743342009-03-13 12:21:26 +01003934asmlinkage void __sched schedule(void)
Ingo Molnardd41f592007-07-09 18:51:59 +02003935{
3936 struct task_struct *prev, *next;
Harvey Harrison67ca7bd2008-02-15 09:56:36 -08003937 unsigned long *switch_count;
Ingo Molnardd41f592007-07-09 18:51:59 +02003938 struct rq *rq;
Peter Zijlstra31656512008-07-18 18:01:23 +02003939 int cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02003940
Peter Zijlstraff743342009-03-13 12:21:26 +01003941need_resched:
3942 preempt_disable();
Ingo Molnardd41f592007-07-09 18:51:59 +02003943 cpu = smp_processor_id();
3944 rq = cpu_rq(cpu);
Paul E. McKenney25502a62010-04-01 17:37:01 -07003945 rcu_note_context_switch(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003946 prev = rq->curr;
Ingo Molnardd41f592007-07-09 18:51:59 +02003947
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 release_kernel_lock(prev);
3949need_resched_nonpreemptible:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950
Ingo Molnardd41f592007-07-09 18:51:59 +02003951 schedule_debug(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952
Peter Zijlstra31656512008-07-18 18:01:23 +02003953 if (sched_feat(HRTICK))
Mike Galbraithf333fdc2008-05-12 21:20:55 +02003954 hrtick_clear(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003955
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003956 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957
Oleg Nesterov246d86b2010-05-19 14:57:11 +02003958 switch_count = &prev->nivcsw;
Ingo Molnardd41f592007-07-09 18:51:59 +02003959 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
Tejun Heo21aa9af2010-06-08 21:40:37 +02003960 if (unlikely(signal_pending_state(prev->state, prev))) {
Ingo Molnardd41f592007-07-09 18:51:59 +02003961 prev->state = TASK_RUNNING;
Tejun Heo21aa9af2010-06-08 21:40:37 +02003962 } else {
3963 /*
3964 * If a worker is going to sleep, notify and
3965 * ask workqueue whether it wants to wake up a
3966 * task to maintain concurrency. If so, wake
3967 * up the task.
3968 */
3969 if (prev->flags & PF_WQ_WORKER) {
3970 struct task_struct *to_wakeup;
3971
3972 to_wakeup = wq_worker_sleeping(prev, cpu);
3973 if (to_wakeup)
3974 try_to_wake_up_local(to_wakeup);
3975 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003976 deactivate_task(rq, prev, DEQUEUE_SLEEP);
Tejun Heo21aa9af2010-06-08 21:40:37 +02003977 }
Ingo Molnardd41f592007-07-09 18:51:59 +02003978 switch_count = &prev->nvcsw;
3979 }
3980
Gregory Haskins3f029d32009-07-29 11:08:47 -04003981 pre_schedule(rq, prev);
Steven Rostedtf65eda42008-01-25 21:08:07 +01003982
Ingo Molnardd41f592007-07-09 18:51:59 +02003983 if (unlikely(!rq->nr_running))
3984 idle_balance(cpu, rq);
3985
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01003986 put_prev_task(rq, prev);
Wang Chenb67802e2009-03-02 13:55:26 +08003987 next = pick_next_task(rq);
Mike Galbraithf26f9af2010-12-08 11:05:42 +01003988 clear_tsk_need_resched(prev);
3989 rq->skip_clock_update = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991 if (likely(prev != next)) {
David Simner673a90a2008-04-29 10:08:59 +01003992 sched_info_switch(prev, next);
Peter Zijlstra49f47432009-12-27 11:51:52 +01003993 perf_event_task_sched_out(prev, next);
David Simner673a90a2008-04-29 10:08:59 +01003994
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995 rq->nr_switches++;
3996 rq->curr = next;
3997 ++*switch_count;
3998
Ingo Molnardd41f592007-07-09 18:51:59 +02003999 context_switch(rq, prev, next); /* unlocks the rq */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004000 /*
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004001 * The context switch have flipped the stack from under us
4002 * and restored the local variables which were saved when
4003 * this task called schedule() in the past. prev == current
4004 * is still correct, but it can be moved to another cpu/rq.
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004005 */
4006 cpu = smp_processor_id();
4007 rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004009 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010
Gregory Haskins3f029d32009-07-29 11:08:47 -04004011 post_schedule(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004012
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004013 if (unlikely(reacquire_kernel_lock(prev)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014 goto need_resched_nonpreemptible;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004015
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016 preempt_enable_no_resched();
Peter Zijlstraff743342009-03-13 12:21:26 +01004017 if (need_resched())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 goto need_resched;
4019}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020EXPORT_SYMBOL(schedule);
4021
Frederic Weisbeckerc08f7822009-12-02 20:49:17 +01004022#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004023/*
4024 * Look out! "owner" is an entirely speculative pointer
4025 * access and not reliable.
4026 */
4027int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
4028{
4029 unsigned int cpu;
4030 struct rq *rq;
4031
4032 if (!sched_feat(OWNER_SPIN))
4033 return 0;
4034
4035#ifdef CONFIG_DEBUG_PAGEALLOC
4036 /*
4037 * Need to access the cpu field knowing that
4038 * DEBUG_PAGEALLOC could have unmapped it if
4039 * the mutex owner just released it and exited.
4040 */
4041 if (probe_kernel_address(&owner->cpu, cpu))
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004042 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004043#else
4044 cpu = owner->cpu;
4045#endif
4046
4047 /*
4048 * Even if the access succeeded (likely case),
4049 * the cpu field may no longer be valid.
4050 */
4051 if (cpu >= nr_cpumask_bits)
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004052 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004053
4054 /*
4055 * We need to validate that we can do a
4056 * get_cpu() and that we have the percpu area.
4057 */
4058 if (!cpu_online(cpu))
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004059 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004060
4061 rq = cpu_rq(cpu);
4062
4063 for (;;) {
4064 /*
4065 * Owner changed, break to re-assess state.
4066 */
Tim Chen9d0f4dc2010-08-18 15:00:27 -07004067 if (lock->owner != owner) {
4068 /*
4069 * If the lock has switched to a different owner,
4070 * we likely have heavy contention. Return 0 to quit
4071 * optimistic spinning and not contend further:
4072 */
4073 if (lock->owner)
4074 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004075 break;
Tim Chen9d0f4dc2010-08-18 15:00:27 -07004076 }
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004077
4078 /*
4079 * Is that owner really running on that cpu?
4080 */
4081 if (task_thread_info(rq->curr) != owner || need_resched())
4082 return 0;
4083
Gerald Schaefer335d7af2010-11-22 15:47:36 +01004084 arch_mutex_cpu_relax();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004085 }
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004086
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004087 return 1;
4088}
4089#endif
4090
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091#ifdef CONFIG_PREEMPT
4092/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004093 * this is the entry point to schedule() from in-kernel preemption
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004094 * off of preempt_enable. Kernel preemptions off return from interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095 * occur there and call schedule directly.
4096 */
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004097asmlinkage void __sched notrace preempt_schedule(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098{
4099 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004100
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101 /*
4102 * If there is a non-zero preempt_count or interrupts are disabled,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004103 * we do not want to preempt the current task. Just return..
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 */
Nick Pigginbeed33a2006-10-11 01:21:52 -07004105 if (likely(ti->preempt_count || irqs_disabled()))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106 return;
4107
Andi Kleen3a5c3592007-10-15 17:00:14 +02004108 do {
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004109 add_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004110 schedule();
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004111 sub_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004112
4113 /*
4114 * Check again in case we missed a preemption opportunity
4115 * between schedule and now.
4116 */
4117 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004118 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120EXPORT_SYMBOL(preempt_schedule);
4121
4122/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004123 * this is the entry point to schedule() from kernel preemption
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124 * off of irq context.
4125 * Note, that this is called and return with irqs disabled. This will
4126 * protect us against recursive calling from irq.
4127 */
4128asmlinkage void __sched preempt_schedule_irq(void)
4129{
4130 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004131
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004132 /* Catch callers which need to be fixed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133 BUG_ON(ti->preempt_count || !irqs_disabled());
4134
Andi Kleen3a5c3592007-10-15 17:00:14 +02004135 do {
4136 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004137 local_irq_enable();
4138 schedule();
4139 local_irq_disable();
Andi Kleen3a5c3592007-10-15 17:00:14 +02004140 sub_preempt_count(PREEMPT_ACTIVE);
4141
4142 /*
4143 * Check again in case we missed a preemption opportunity
4144 * between schedule and now.
4145 */
4146 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004147 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148}
4149
4150#endif /* CONFIG_PREEMPT */
4151
Peter Zijlstra63859d42009-09-15 19:14:42 +02004152int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004153 void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154{
Peter Zijlstra63859d42009-09-15 19:14:42 +02004155 return try_to_wake_up(curr->private, mode, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157EXPORT_SYMBOL(default_wake_function);
4158
4159/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004160 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4161 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 * number) then we wake all the non-exclusive tasks and one exclusive task.
4163 *
4164 * There are circumstances in which we can try to wake a task which has already
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004165 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4167 */
Johannes Weiner78ddb082009-04-14 16:53:05 +02004168static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
Peter Zijlstra63859d42009-09-15 19:14:42 +02004169 int nr_exclusive, int wake_flags, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170{
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004171 wait_queue_t *curr, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004173 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07004174 unsigned flags = curr->flags;
4175
Peter Zijlstra63859d42009-09-15 19:14:42 +02004176 if (curr->func(curr, mode, wake_flags, key) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07004177 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178 break;
4179 }
4180}
4181
4182/**
4183 * __wake_up - wake up threads blocked on a waitqueue.
4184 * @q: the waitqueue
4185 * @mode: which threads
4186 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Martin Waitz67be2dd2005-05-01 08:59:26 -07004187 * @key: is directly passed to the wakeup function
David Howells50fa6102009-04-28 15:01:38 +01004188 *
4189 * It may be assumed that this function implies a write memory barrier before
4190 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004192void __wake_up(wait_queue_head_t *q, unsigned int mode,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004193 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194{
4195 unsigned long flags;
4196
4197 spin_lock_irqsave(&q->lock, flags);
4198 __wake_up_common(q, mode, nr_exclusive, 0, key);
4199 spin_unlock_irqrestore(&q->lock, flags);
4200}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201EXPORT_SYMBOL(__wake_up);
4202
4203/*
4204 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4205 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004206void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207{
4208 __wake_up_common(q, mode, 1, 0, NULL);
4209}
Michal Nazarewicz22c43c82010-05-05 12:53:11 +02004210EXPORT_SYMBOL_GPL(__wake_up_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211
Davide Libenzi4ede8162009-03-31 15:24:20 -07004212void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4213{
4214 __wake_up_common(q, mode, 1, 0, key);
4215}
4216
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217/**
Davide Libenzi4ede8162009-03-31 15:24:20 -07004218 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219 * @q: the waitqueue
4220 * @mode: which threads
4221 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Davide Libenzi4ede8162009-03-31 15:24:20 -07004222 * @key: opaque value to be passed to wakeup targets
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223 *
4224 * The sync wakeup differs that the waker knows that it will schedule
4225 * away soon, so while the target thread will be woken up, it will not
4226 * be migrated to another CPU - ie. the two threads are 'synchronized'
4227 * with each other. This can prevent needless bouncing between CPUs.
4228 *
4229 * On UP it can prevent extra preemption.
David Howells50fa6102009-04-28 15:01:38 +01004230 *
4231 * It may be assumed that this function implies a write memory barrier before
4232 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233 */
Davide Libenzi4ede8162009-03-31 15:24:20 -07004234void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4235 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236{
4237 unsigned long flags;
Peter Zijlstra7d478722009-09-14 19:55:44 +02004238 int wake_flags = WF_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239
4240 if (unlikely(!q))
4241 return;
4242
4243 if (unlikely(!nr_exclusive))
Peter Zijlstra7d478722009-09-14 19:55:44 +02004244 wake_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245
4246 spin_lock_irqsave(&q->lock, flags);
Peter Zijlstra7d478722009-09-14 19:55:44 +02004247 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248 spin_unlock_irqrestore(&q->lock, flags);
4249}
Davide Libenzi4ede8162009-03-31 15:24:20 -07004250EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4251
4252/*
4253 * __wake_up_sync - see __wake_up_sync_key()
4254 */
4255void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4256{
4257 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4258}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4260
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004261/**
4262 * complete: - signals a single thread waiting on this completion
4263 * @x: holds the state of this particular completion
4264 *
4265 * This will wake up a single thread waiting on this completion. Threads will be
4266 * awakened in the same order in which they were queued.
4267 *
4268 * See also complete_all(), wait_for_completion() and related routines.
David Howells50fa6102009-04-28 15:01:38 +01004269 *
4270 * It may be assumed that this function implies a write memory barrier before
4271 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004272 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004273void complete(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274{
4275 unsigned long flags;
4276
4277 spin_lock_irqsave(&x->wait.lock, flags);
4278 x->done++;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004279 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280 spin_unlock_irqrestore(&x->wait.lock, flags);
4281}
4282EXPORT_SYMBOL(complete);
4283
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004284/**
4285 * complete_all: - signals all threads waiting on this completion
4286 * @x: holds the state of this particular completion
4287 *
4288 * This will wake up all threads waiting on this particular completion event.
David Howells50fa6102009-04-28 15:01:38 +01004289 *
4290 * It may be assumed that this function implies a write memory barrier before
4291 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004292 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004293void complete_all(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294{
4295 unsigned long flags;
4296
4297 spin_lock_irqsave(&x->wait.lock, flags);
4298 x->done += UINT_MAX/2;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004299 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 spin_unlock_irqrestore(&x->wait.lock, flags);
4301}
4302EXPORT_SYMBOL(complete_all);
4303
Andi Kleen8cbbe862007-10-15 17:00:14 +02004304static inline long __sched
4305do_wait_for_common(struct completion *x, long timeout, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307 if (!x->done) {
4308 DECLARE_WAITQUEUE(wait, current);
4309
Changli Gaoa93d2f12010-05-07 14:33:26 +08004310 __add_wait_queue_tail_exclusive(&x->wait, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 do {
Oleg Nesterov94d3d822008-08-20 16:54:41 -07004312 if (signal_pending_state(state, current)) {
Oleg Nesterovea71a542008-06-20 18:32:20 +04004313 timeout = -ERESTARTSYS;
4314 break;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004315 }
4316 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004318 timeout = schedule_timeout(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 spin_lock_irq(&x->wait.lock);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004320 } while (!x->done && timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 __remove_wait_queue(&x->wait, &wait);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004322 if (!x->done)
4323 return timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 }
4325 x->done--;
Oleg Nesterovea71a542008-06-20 18:32:20 +04004326 return timeout ?: 1;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004327}
4328
4329static long __sched
4330wait_for_common(struct completion *x, long timeout, int state)
4331{
4332 might_sleep();
4333
4334 spin_lock_irq(&x->wait.lock);
4335 timeout = do_wait_for_common(x, timeout, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004337 return timeout;
4338}
4339
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004340/**
4341 * wait_for_completion: - waits for completion of a task
4342 * @x: holds the state of this particular completion
4343 *
4344 * This waits to be signaled for completion of a specific task. It is NOT
4345 * interruptible and there is no timeout.
4346 *
4347 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4348 * and interrupt capability. Also see complete().
4349 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004350void __sched wait_for_completion(struct completion *x)
Andi Kleen8cbbe862007-10-15 17:00:14 +02004351{
4352 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353}
4354EXPORT_SYMBOL(wait_for_completion);
4355
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004356/**
4357 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4358 * @x: holds the state of this particular completion
4359 * @timeout: timeout value in jiffies
4360 *
4361 * This waits for either a completion of a specific task to be signaled or for a
4362 * specified timeout to expire. The timeout is in jiffies. It is not
4363 * interruptible.
4364 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004365unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4367{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004368 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369}
4370EXPORT_SYMBOL(wait_for_completion_timeout);
4371
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004372/**
4373 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4374 * @x: holds the state of this particular completion
4375 *
4376 * This waits for completion of a specific task to be signaled. It is
4377 * interruptible.
4378 */
Andi Kleen8cbbe862007-10-15 17:00:14 +02004379int __sched wait_for_completion_interruptible(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380{
Andi Kleen51e97992007-10-18 21:32:55 +02004381 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4382 if (t == -ERESTARTSYS)
4383 return t;
4384 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385}
4386EXPORT_SYMBOL(wait_for_completion_interruptible);
4387
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004388/**
4389 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4390 * @x: holds the state of this particular completion
4391 * @timeout: timeout value in jiffies
4392 *
4393 * This waits for either a completion of a specific task to be signaled or for a
4394 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4395 */
NeilBrown6bf41232011-01-05 12:50:16 +11004396long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397wait_for_completion_interruptible_timeout(struct completion *x,
4398 unsigned long timeout)
4399{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004400 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401}
4402EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4403
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004404/**
4405 * wait_for_completion_killable: - waits for completion of a task (killable)
4406 * @x: holds the state of this particular completion
4407 *
4408 * This waits to be signaled for completion of a specific task. It can be
4409 * interrupted by a kill signal.
4410 */
Matthew Wilcox009e5772007-12-06 12:29:54 -05004411int __sched wait_for_completion_killable(struct completion *x)
4412{
4413 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4414 if (t == -ERESTARTSYS)
4415 return t;
4416 return 0;
4417}
4418EXPORT_SYMBOL(wait_for_completion_killable);
4419
Dave Chinnerbe4de352008-08-15 00:40:44 -07004420/**
Sage Weil0aa12fb2010-05-29 09:12:30 -07004421 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4422 * @x: holds the state of this particular completion
4423 * @timeout: timeout value in jiffies
4424 *
4425 * This waits for either a completion of a specific task to be
4426 * signaled or for a specified timeout to expire. It can be
4427 * interrupted by a kill signal. The timeout is in jiffies.
4428 */
NeilBrown6bf41232011-01-05 12:50:16 +11004429long __sched
Sage Weil0aa12fb2010-05-29 09:12:30 -07004430wait_for_completion_killable_timeout(struct completion *x,
4431 unsigned long timeout)
4432{
4433 return wait_for_common(x, timeout, TASK_KILLABLE);
4434}
4435EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4436
4437/**
Dave Chinnerbe4de352008-08-15 00:40:44 -07004438 * try_wait_for_completion - try to decrement a completion without blocking
4439 * @x: completion structure
4440 *
4441 * Returns: 0 if a decrement cannot be done without blocking
4442 * 1 if a decrement succeeded.
4443 *
4444 * If a completion is being used as a counting completion,
4445 * attempt to decrement the counter without blocking. This
4446 * enables us to avoid waiting if the resource the completion
4447 * is protecting is not available.
4448 */
4449bool try_wait_for_completion(struct completion *x)
4450{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004451 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004452 int ret = 1;
4453
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004454 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004455 if (!x->done)
4456 ret = 0;
4457 else
4458 x->done--;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004459 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004460 return ret;
4461}
4462EXPORT_SYMBOL(try_wait_for_completion);
4463
4464/**
4465 * completion_done - Test to see if a completion has any waiters
4466 * @x: completion structure
4467 *
4468 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4469 * 1 if there are no waiters.
4470 *
4471 */
4472bool completion_done(struct completion *x)
4473{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004474 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004475 int ret = 1;
4476
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004477 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004478 if (!x->done)
4479 ret = 0;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004480 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004481 return ret;
4482}
4483EXPORT_SYMBOL(completion_done);
4484
Andi Kleen8cbbe862007-10-15 17:00:14 +02004485static long __sched
4486sleep_on_common(wait_queue_head_t *q, int state, long timeout)
Ingo Molnar0fec1712007-07-09 18:52:01 +02004487{
4488 unsigned long flags;
4489 wait_queue_t wait;
4490
4491 init_waitqueue_entry(&wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492
Andi Kleen8cbbe862007-10-15 17:00:14 +02004493 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494
Andi Kleen8cbbe862007-10-15 17:00:14 +02004495 spin_lock_irqsave(&q->lock, flags);
4496 __add_wait_queue(q, &wait);
4497 spin_unlock(&q->lock);
4498 timeout = schedule_timeout(timeout);
4499 spin_lock_irq(&q->lock);
4500 __remove_wait_queue(q, &wait);
4501 spin_unlock_irqrestore(&q->lock, flags);
4502
4503 return timeout;
4504}
4505
4506void __sched interruptible_sleep_on(wait_queue_head_t *q)
4507{
4508 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510EXPORT_SYMBOL(interruptible_sleep_on);
4511
Ingo Molnar0fec1712007-07-09 18:52:01 +02004512long __sched
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004513interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004515 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4518
Ingo Molnar0fec1712007-07-09 18:52:01 +02004519void __sched sleep_on(wait_queue_head_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004521 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523EXPORT_SYMBOL(sleep_on);
4524
Ingo Molnar0fec1712007-07-09 18:52:01 +02004525long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004527 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529EXPORT_SYMBOL(sleep_on_timeout);
4530
Ingo Molnarb29739f2006-06-27 02:54:51 -07004531#ifdef CONFIG_RT_MUTEXES
4532
4533/*
4534 * rt_mutex_setprio - set the current priority of a task
4535 * @p: task
4536 * @prio: prio value (kernel-internal form)
4537 *
4538 * This function changes the 'effective' priority of a task. It does
4539 * not touch ->normal_prio like __setscheduler().
4540 *
4541 * Used by the rt_mutex code to implement priority inheritance logic.
4542 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004543void rt_mutex_setprio(struct task_struct *p, int prio)
Ingo Molnarb29739f2006-06-27 02:54:51 -07004544{
4545 unsigned long flags;
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004546 int oldprio, on_rq, running;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004547 struct rq *rq;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004548 const struct sched_class *prev_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004549
4550 BUG_ON(prio < 0 || prio > MAX_PRIO);
4551
4552 rq = task_rq_lock(p, &flags);
4553
Steven Rostedta8027072010-09-20 15:13:34 -04004554 trace_sched_pi_setprio(p, prio);
Andrew Mortond5f9f942007-05-08 20:27:06 -07004555 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004556 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004557 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004558 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004559 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004560 dequeue_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004561 if (running)
4562 p->sched_class->put_prev_task(rq, p);
Ingo Molnardd41f592007-07-09 18:51:59 +02004563
4564 if (rt_prio(prio))
4565 p->sched_class = &rt_sched_class;
4566 else
4567 p->sched_class = &fair_sched_class;
4568
Ingo Molnarb29739f2006-06-27 02:54:51 -07004569 p->prio = prio;
4570
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004571 if (running)
4572 p->sched_class->set_curr_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004573 if (on_rq) {
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004574 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004575
4576 check_class_changed(rq, p, prev_class, oldprio, running);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004577 }
4578 task_rq_unlock(rq, &flags);
4579}
4580
4581#endif
4582
Ingo Molnar36c8b582006-07-03 00:25:41 -07004583void set_user_nice(struct task_struct *p, long nice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584{
Ingo Molnardd41f592007-07-09 18:51:59 +02004585 int old_prio, delta, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004587 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588
4589 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4590 return;
4591 /*
4592 * We have to be careful, if called from sys_setpriority(),
4593 * the task might be in the middle of scheduling on another CPU.
4594 */
4595 rq = task_rq_lock(p, &flags);
4596 /*
4597 * The RT priorities are set via sched_setscheduler(), but we still
4598 * allow the 'normal' nice value to be set - but as expected
4599 * it wont have any effect on scheduling until the task is
Ingo Molnardd41f592007-07-09 18:51:59 +02004600 * SCHED_FIFO/SCHED_RR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601 */
Ingo Molnare05606d2007-07-09 18:51:59 +02004602 if (task_has_rt_policy(p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 p->static_prio = NICE_TO_PRIO(nice);
4604 goto out_unlock;
4605 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004606 on_rq = p->se.on_rq;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02004607 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004608 dequeue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610 p->static_prio = NICE_TO_PRIO(nice);
Peter Williams2dd73a42006-06-27 02:54:34 -07004611 set_load_weight(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004612 old_prio = p->prio;
4613 p->prio = effective_prio(p);
4614 delta = p->prio - old_prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615
Ingo Molnardd41f592007-07-09 18:51:59 +02004616 if (on_rq) {
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004617 enqueue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618 /*
Andrew Mortond5f9f942007-05-08 20:27:06 -07004619 * If the task increased its priority or is running and
4620 * lowered its priority, then reschedule its CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621 */
Andrew Mortond5f9f942007-05-08 20:27:06 -07004622 if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 resched_task(rq->curr);
4624 }
4625out_unlock:
4626 task_rq_unlock(rq, &flags);
4627}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628EXPORT_SYMBOL(set_user_nice);
4629
Matt Mackalle43379f2005-05-01 08:59:00 -07004630/*
4631 * can_nice - check if a task can reduce its nice value
4632 * @p: task
4633 * @nice: nice value
4634 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004635int can_nice(const struct task_struct *p, const int nice)
Matt Mackalle43379f2005-05-01 08:59:00 -07004636{
Matt Mackall024f4742005-08-18 11:24:19 -07004637 /* convert nice value [19,-20] to rlimit style value [1,40] */
4638 int nice_rlim = 20 - nice;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004639
Jiri Slaby78d7d402010-03-05 13:42:54 -08004640 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
Matt Mackalle43379f2005-05-01 08:59:00 -07004641 capable(CAP_SYS_NICE));
4642}
4643
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644#ifdef __ARCH_WANT_SYS_NICE
4645
4646/*
4647 * sys_nice - change the priority of the current process.
4648 * @increment: priority increment
4649 *
4650 * sys_setpriority is a more generic, but much slower function that
4651 * does similar things.
4652 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004653SYSCALL_DEFINE1(nice, int, increment)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654{
Ingo Molnar48f24c42006-07-03 00:25:40 -07004655 long nice, retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656
4657 /*
4658 * Setpriority might change our priority at the same moment.
4659 * We don't have to worry. Conceptually one call occurs first
4660 * and we have a single winner.
4661 */
Matt Mackalle43379f2005-05-01 08:59:00 -07004662 if (increment < -40)
4663 increment = -40;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 if (increment > 40)
4665 increment = 40;
4666
Américo Wang2b8f8362009-02-16 18:54:21 +08004667 nice = TASK_NICE(current) + increment;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668 if (nice < -20)
4669 nice = -20;
4670 if (nice > 19)
4671 nice = 19;
4672
Matt Mackalle43379f2005-05-01 08:59:00 -07004673 if (increment < 0 && !can_nice(current, nice))
4674 return -EPERM;
4675
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676 retval = security_task_setnice(current, nice);
4677 if (retval)
4678 return retval;
4679
4680 set_user_nice(current, nice);
4681 return 0;
4682}
4683
4684#endif
4685
4686/**
4687 * task_prio - return the priority value of a given task.
4688 * @p: the task in question.
4689 *
4690 * This is the priority value as seen by users in /proc.
4691 * RT tasks are offset by -200. Normal tasks are centered
4692 * around 0, value goes from -16 to +15.
4693 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004694int task_prio(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695{
4696 return p->prio - MAX_RT_PRIO;
4697}
4698
4699/**
4700 * task_nice - return the nice value of a given task.
4701 * @p: the task in question.
4702 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004703int task_nice(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704{
4705 return TASK_NICE(p);
4706}
Pavel Roskin150d8be2008-03-05 16:56:37 -05004707EXPORT_SYMBOL(task_nice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708
4709/**
4710 * idle_cpu - is a given cpu idle currently?
4711 * @cpu: the processor in question.
4712 */
4713int idle_cpu(int cpu)
4714{
4715 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4716}
4717
Linus Torvalds1da177e2005-04-16 15:20:36 -07004718/**
4719 * idle_task - return the idle task for a given cpu.
4720 * @cpu: the processor in question.
4721 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004722struct task_struct *idle_task(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723{
4724 return cpu_rq(cpu)->idle;
4725}
4726
4727/**
4728 * find_process_by_pid - find a process with a matching PID value.
4729 * @pid: the pid in question.
4730 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02004731static struct task_struct *find_process_by_pid(pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732{
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07004733 return pid ? find_task_by_vpid(pid) : current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734}
4735
4736/* Actually do priority change: must hold rq lock. */
Ingo Molnardd41f592007-07-09 18:51:59 +02004737static void
4738__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739{
Ingo Molnardd41f592007-07-09 18:51:59 +02004740 BUG_ON(p->se.on_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07004741
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742 p->policy = policy;
4743 p->rt_priority = prio;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004744 p->normal_prio = normal_prio(p);
4745 /* we are holding p->pi_lock already */
4746 p->prio = rt_mutex_getprio(p);
Peter Zijlstraffd44db2009-11-10 20:12:01 +01004747 if (rt_prio(p->prio))
4748 p->sched_class = &rt_sched_class;
4749 else
4750 p->sched_class = &fair_sched_class;
Peter Williams2dd73a42006-06-27 02:54:34 -07004751 set_load_weight(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752}
4753
David Howellsc69e8d92008-11-14 10:39:19 +11004754/*
4755 * check the target process has a UID that matches the current process's
4756 */
4757static bool check_same_owner(struct task_struct *p)
4758{
4759 const struct cred *cred = current_cred(), *pcred;
4760 bool match;
4761
4762 rcu_read_lock();
4763 pcred = __task_cred(p);
4764 match = (cred->euid == pcred->euid ||
4765 cred->euid == pcred->uid);
4766 rcu_read_unlock();
4767 return match;
4768}
4769
Rusty Russell961ccdd2008-06-23 13:55:38 +10004770static int __sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07004771 const struct sched_param *param, bool user)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004773 int retval, oldprio, oldpolicy = -1, on_rq, running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004774 unsigned long flags;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004775 const struct sched_class *prev_class;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004776 struct rq *rq;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004777 int reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778
Steven Rostedt66e53932006-06-27 02:54:44 -07004779 /* may grab non-irq protected spin_locks */
4780 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781recheck:
4782 /* double check policy once rq lock held */
Lennart Poetteringca94c442009-06-15 17:17:47 +02004783 if (policy < 0) {
4784 reset_on_fork = p->sched_reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785 policy = oldpolicy = p->policy;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004786 } else {
4787 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4788 policy &= ~SCHED_RESET_ON_FORK;
4789
4790 if (policy != SCHED_FIFO && policy != SCHED_RR &&
4791 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4792 policy != SCHED_IDLE)
4793 return -EINVAL;
4794 }
4795
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796 /*
4797 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnardd41f592007-07-09 18:51:59 +02004798 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4799 * SCHED_BATCH and SCHED_IDLE is 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800 */
4801 if (param->sched_priority < 0 ||
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004802 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
Steven Rostedtd46523e2005-07-25 16:28:39 -04004803 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 return -EINVAL;
Ingo Molnare05606d2007-07-09 18:51:59 +02004805 if (rt_policy(policy) != (param->sched_priority != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806 return -EINVAL;
4807
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004808 /*
4809 * Allow unprivileged RT tasks to decrease priority:
4810 */
Rusty Russell961ccdd2008-06-23 13:55:38 +10004811 if (user && !capable(CAP_SYS_NICE)) {
Ingo Molnare05606d2007-07-09 18:51:59 +02004812 if (rt_policy(policy)) {
Oleg Nesterova44702e2010-06-11 01:09:44 +02004813 unsigned long rlim_rtprio =
4814 task_rlimit(p, RLIMIT_RTPRIO);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004815
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004816 /* can't set/change the rt policy */
4817 if (policy != p->policy && !rlim_rtprio)
4818 return -EPERM;
4819
4820 /* can't increase priority */
4821 if (param->sched_priority > p->rt_priority &&
4822 param->sched_priority > rlim_rtprio)
4823 return -EPERM;
4824 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004825 /*
4826 * Like positive nice levels, dont allow tasks to
4827 * move out of SCHED_IDLE either:
4828 */
4829 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
4830 return -EPERM;
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004831
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004832 /* can't change other user's priorities */
David Howellsc69e8d92008-11-14 10:39:19 +11004833 if (!check_same_owner(p))
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004834 return -EPERM;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004835
4836 /* Normal users shall not reset the sched_reset_on_fork flag */
4837 if (p->sched_reset_on_fork && !reset_on_fork)
4838 return -EPERM;
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004839 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004840
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004841 if (user) {
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09004842 retval = security_task_setscheduler(p);
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004843 if (retval)
4844 return retval;
4845 }
4846
Linus Torvalds1da177e2005-04-16 15:20:36 -07004847 /*
Ingo Molnarb29739f2006-06-27 02:54:51 -07004848 * make sure no PI-waiters arrive (or leave) while we are
4849 * changing the priority of the task:
4850 */
Thomas Gleixner1d615482009-11-17 14:54:03 +01004851 raw_spin_lock_irqsave(&p->pi_lock, flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004852 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004853 * To be able to change p->policy safely, the apropriate
4854 * runqueue lock must be held.
4855 */
Ingo Molnarb29739f2006-06-27 02:54:51 -07004856 rq = __task_rq_lock(p);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02004857
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004858 /*
4859 * Changing the policy of the stop threads its a very bad idea
4860 */
4861 if (p == rq->stop) {
4862 __task_rq_unlock(rq);
4863 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4864 return -EINVAL;
4865 }
4866
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02004867#ifdef CONFIG_RT_GROUP_SCHED
4868 if (user) {
4869 /*
4870 * Do not allow realtime tasks into groups that have no runtime
4871 * assigned.
4872 */
4873 if (rt_bandwidth_enabled() && rt_policy(policy) &&
Mike Galbraithf4493772011-01-13 04:54:50 +01004874 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4875 !task_group_is_autogroup(task_group(p))) {
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02004876 __task_rq_unlock(rq);
4877 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4878 return -EPERM;
4879 }
4880 }
4881#endif
4882
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883 /* recheck policy now with rq lock held */
4884 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4885 policy = oldpolicy = -1;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004886 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01004887 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888 goto recheck;
4889 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004890 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004891 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004892 if (on_rq)
Ingo Molnar2e1cb742007-08-09 11:16:49 +02004893 deactivate_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004894 if (running)
4895 p->sched_class->put_prev_task(rq, p);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02004896
Lennart Poetteringca94c442009-06-15 17:17:47 +02004897 p->sched_reset_on_fork = reset_on_fork;
4898
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004900 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004901 __setscheduler(rq, p, policy, param->sched_priority);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02004902
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004903 if (running)
4904 p->sched_class->set_curr_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004905 if (on_rq) {
4906 activate_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004907
4908 check_class_changed(rq, p, prev_class, oldprio, running);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004909 }
Ingo Molnarb29739f2006-06-27 02:54:51 -07004910 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01004911 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004912
Thomas Gleixner95e02ca2006-06-27 02:55:02 -07004913 rt_mutex_adjust_pi(p);
4914
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915 return 0;
4916}
Rusty Russell961ccdd2008-06-23 13:55:38 +10004917
4918/**
4919 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4920 * @p: the task in question.
4921 * @policy: new policy.
4922 * @param: structure containing the new RT priority.
4923 *
4924 * NOTE that the task may be already dead.
4925 */
4926int sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07004927 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10004928{
4929 return __sched_setscheduler(p, policy, param, true);
4930}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004931EXPORT_SYMBOL_GPL(sched_setscheduler);
4932
Rusty Russell961ccdd2008-06-23 13:55:38 +10004933/**
4934 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4935 * @p: the task in question.
4936 * @policy: new policy.
4937 * @param: structure containing the new RT priority.
4938 *
4939 * Just like sched_setscheduler, only don't bother checking if the
4940 * current context has permission. For example, this is needed in
4941 * stop_machine(): we create temporary high priority worker threads,
4942 * but our caller might not have that capability.
4943 */
4944int sched_setscheduler_nocheck(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07004945 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10004946{
4947 return __sched_setscheduler(p, policy, param, false);
4948}
4949
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004950static int
4951do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 struct sched_param lparam;
4954 struct task_struct *p;
Ingo Molnar36c8b582006-07-03 00:25:41 -07004955 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956
4957 if (!param || pid < 0)
4958 return -EINVAL;
4959 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4960 return -EFAULT;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004961
4962 rcu_read_lock();
4963 retval = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 p = find_process_by_pid(pid);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004965 if (p != NULL)
4966 retval = sched_setscheduler(p, policy, &lparam);
4967 rcu_read_unlock();
Ingo Molnar36c8b582006-07-03 00:25:41 -07004968
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969 return retval;
4970}
4971
4972/**
4973 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4974 * @pid: the pid in question.
4975 * @policy: new policy.
4976 * @param: structure containing the new RT priority.
4977 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004978SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4979 struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980{
Jason Baronc21761f2006-01-18 17:43:03 -08004981 /* negative values for policy are not valid */
4982 if (policy < 0)
4983 return -EINVAL;
4984
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985 return do_sched_setscheduler(pid, policy, param);
4986}
4987
4988/**
4989 * sys_sched_setparam - set/change the RT priority of a thread
4990 * @pid: the pid in question.
4991 * @param: structure containing the new RT priority.
4992 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004993SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994{
4995 return do_sched_setscheduler(pid, -1, param);
4996}
4997
4998/**
4999 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5000 * @pid: the pid in question.
5001 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005002SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005004 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005005 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006
5007 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005008 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009
5010 retval = -ESRCH;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005011 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012 p = find_process_by_pid(pid);
5013 if (p) {
5014 retval = security_task_getscheduler(p);
5015 if (!retval)
Lennart Poetteringca94c442009-06-15 17:17:47 +02005016 retval = p->policy
5017 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018 }
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005019 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020 return retval;
5021}
5022
5023/**
Lennart Poetteringca94c442009-06-15 17:17:47 +02005024 * sys_sched_getparam - get the RT priority of a thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025 * @pid: the pid in question.
5026 * @param: structure containing the RT priority.
5027 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005028SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029{
5030 struct sched_param lp;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005031 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005032 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033
5034 if (!param || pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005035 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005036
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005037 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005038 p = find_process_by_pid(pid);
5039 retval = -ESRCH;
5040 if (!p)
5041 goto out_unlock;
5042
5043 retval = security_task_getscheduler(p);
5044 if (retval)
5045 goto out_unlock;
5046
5047 lp.sched_priority = p->rt_priority;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005048 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049
5050 /*
5051 * This one might sleep, we cannot do it with a spinlock held ...
5052 */
5053 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5054
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055 return retval;
5056
5057out_unlock:
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005058 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059 return retval;
5060}
5061
Rusty Russell96f874e2008-11-25 02:35:14 +10305062long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305064 cpumask_var_t cpus_allowed, new_mask;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005065 struct task_struct *p;
5066 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005068 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005069 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070
5071 p = find_process_by_pid(pid);
5072 if (!p) {
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005073 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005074 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075 return -ESRCH;
5076 }
5077
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005078 /* Prevent p going away */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005079 get_task_struct(p);
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005080 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305082 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5083 retval = -ENOMEM;
5084 goto out_put_task;
5085 }
5086 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5087 retval = -ENOMEM;
5088 goto out_free_cpus_allowed;
5089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090 retval = -EPERM;
David Howellsc69e8d92008-11-14 10:39:19 +11005091 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005092 goto out_unlock;
5093
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09005094 retval = security_task_setscheduler(p);
David Quigleye7834f82006-06-23 02:03:59 -07005095 if (retval)
5096 goto out_unlock;
5097
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305098 cpuset_cpus_allowed(p, cpus_allowed);
5099 cpumask_and(new_mask, in_mask, cpus_allowed);
Peter Zijlstra49246272010-10-17 21:46:10 +02005100again:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305101 retval = set_cpus_allowed_ptr(p, new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005102
Paul Menage8707d8b2007-10-18 23:40:22 -07005103 if (!retval) {
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305104 cpuset_cpus_allowed(p, cpus_allowed);
5105 if (!cpumask_subset(new_mask, cpus_allowed)) {
Paul Menage8707d8b2007-10-18 23:40:22 -07005106 /*
5107 * We must have raced with a concurrent cpuset
5108 * update. Just reset the cpus_allowed to the
5109 * cpuset's cpus_allowed
5110 */
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305111 cpumask_copy(new_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07005112 goto again;
5113 }
5114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115out_unlock:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305116 free_cpumask_var(new_mask);
5117out_free_cpus_allowed:
5118 free_cpumask_var(cpus_allowed);
5119out_put_task:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120 put_task_struct(p);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005121 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122 return retval;
5123}
5124
5125static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
Rusty Russell96f874e2008-11-25 02:35:14 +10305126 struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127{
Rusty Russell96f874e2008-11-25 02:35:14 +10305128 if (len < cpumask_size())
5129 cpumask_clear(new_mask);
5130 else if (len > cpumask_size())
5131 len = cpumask_size();
5132
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5134}
5135
5136/**
5137 * sys_sched_setaffinity - set the cpu affinity of a process
5138 * @pid: pid of the process
5139 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5140 * @user_mask_ptr: user-space pointer to the new cpu mask
5141 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005142SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5143 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005144{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305145 cpumask_var_t new_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146 int retval;
5147
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305148 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5149 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305151 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5152 if (retval == 0)
5153 retval = sched_setaffinity(pid, new_mask);
5154 free_cpumask_var(new_mask);
5155 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156}
5157
Rusty Russell96f874e2008-11-25 02:35:14 +10305158long sched_getaffinity(pid_t pid, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005160 struct task_struct *p;
Thomas Gleixner31605682009-12-08 20:24:16 +00005161 unsigned long flags;
5162 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005165 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005166 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167
5168 retval = -ESRCH;
5169 p = find_process_by_pid(pid);
5170 if (!p)
5171 goto out_unlock;
5172
David Quigleye7834f82006-06-23 02:03:59 -07005173 retval = security_task_getscheduler(p);
5174 if (retval)
5175 goto out_unlock;
5176
Thomas Gleixner31605682009-12-08 20:24:16 +00005177 rq = task_rq_lock(p, &flags);
Rusty Russell96f874e2008-11-25 02:35:14 +10305178 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
Thomas Gleixner31605682009-12-08 20:24:16 +00005179 task_rq_unlock(rq, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180
5181out_unlock:
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005182 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005183 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184
Ulrich Drepper9531b622007-08-09 11:16:46 +02005185 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186}
5187
5188/**
5189 * sys_sched_getaffinity - get the cpu affinity of a process
5190 * @pid: pid of the process
5191 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5192 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5193 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005194SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5195 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196{
5197 int ret;
Rusty Russellf17c8602008-11-25 02:35:11 +10305198 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199
Anton Blanchard84fba5e2010-04-06 17:02:19 +10005200 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005201 return -EINVAL;
5202 if (len & (sizeof(unsigned long)-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203 return -EINVAL;
5204
Rusty Russellf17c8602008-11-25 02:35:11 +10305205 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5206 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207
Rusty Russellf17c8602008-11-25 02:35:11 +10305208 ret = sched_getaffinity(pid, mask);
5209 if (ret == 0) {
KOSAKI Motohiro8bc037f2010-03-17 09:36:58 +09005210 size_t retlen = min_t(size_t, len, cpumask_size());
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005211
5212 if (copy_to_user(user_mask_ptr, mask, retlen))
Rusty Russellf17c8602008-11-25 02:35:11 +10305213 ret = -EFAULT;
5214 else
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005215 ret = retlen;
Rusty Russellf17c8602008-11-25 02:35:11 +10305216 }
5217 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218
Rusty Russellf17c8602008-11-25 02:35:11 +10305219 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220}
5221
5222/**
5223 * sys_sched_yield - yield the current processor to other threads.
5224 *
Ingo Molnardd41f592007-07-09 18:51:59 +02005225 * This function yields the current CPU to other tasks. If there are no
5226 * other threads running on this CPU then this function will return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005228SYSCALL_DEFINE0(sched_yield)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005230 struct rq *rq = this_rq_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231
Ingo Molnar2d723762007-10-15 17:00:12 +02005232 schedstat_inc(rq, yld_count);
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02005233 current->sched_class->yield_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234
5235 /*
5236 * Since we are going to call schedule() anyway, there's
5237 * no need to preempt or enable interrupts:
5238 */
5239 __release(rq->lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07005240 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +01005241 do_raw_spin_unlock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242 preempt_enable_no_resched();
5243
5244 schedule();
5245
5246 return 0;
5247}
5248
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005249static inline int should_resched(void)
5250{
5251 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5252}
5253
Andrew Mortone7b38402006-06-30 01:56:00 -07005254static void __cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255{
Frederic Weisbeckere7aaaa62009-07-16 15:44:29 +02005256 add_preempt_count(PREEMPT_ACTIVE);
5257 schedule();
5258 sub_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259}
5260
Herbert Xu02b67cc32008-01-25 21:08:28 +01005261int __sched _cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005263 if (should_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264 __cond_resched();
5265 return 1;
5266 }
5267 return 0;
5268}
Herbert Xu02b67cc32008-01-25 21:08:28 +01005269EXPORT_SYMBOL(_cond_resched);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005270
5271/*
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005272 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 * call schedule, and on return reacquire the lock.
5274 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005275 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
Linus Torvalds1da177e2005-04-16 15:20:36 -07005276 * operations here to prevent schedule() from being called twice (once via
5277 * spin_unlock(), once by hand).
5278 */
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005279int __cond_resched_lock(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005280{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005281 int resched = should_resched();
Jan Kara6df3cec2005-06-13 15:52:32 -07005282 int ret = 0;
5283
Peter Zijlstraf607c662009-07-20 19:16:29 +02005284 lockdep_assert_held(lock);
5285
Nick Piggin95c354f2008-01-30 13:31:20 +01005286 if (spin_needbreak(lock) || resched) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005287 spin_unlock(lock);
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005288 if (resched)
Nick Piggin95c354f2008-01-30 13:31:20 +01005289 __cond_resched();
5290 else
5291 cpu_relax();
Jan Kara6df3cec2005-06-13 15:52:32 -07005292 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005293 spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005294 }
Jan Kara6df3cec2005-06-13 15:52:32 -07005295 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005296}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005297EXPORT_SYMBOL(__cond_resched_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005299int __sched __cond_resched_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300{
5301 BUG_ON(!in_softirq());
5302
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005303 if (should_resched()) {
Thomas Gleixner98d825672007-05-23 13:58:18 -07005304 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305 __cond_resched();
5306 local_bh_disable();
5307 return 1;
5308 }
5309 return 0;
5310}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005311EXPORT_SYMBOL(__cond_resched_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005312
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313/**
5314 * yield - yield the current processor to other threads.
5315 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08005316 * This is a shortcut for kernel-space yielding - it marks the
Linus Torvalds1da177e2005-04-16 15:20:36 -07005317 * thread runnable and calls sys_sched_yield().
5318 */
5319void __sched yield(void)
5320{
5321 set_current_state(TASK_RUNNING);
5322 sys_sched_yield();
5323}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324EXPORT_SYMBOL(yield);
5325
5326/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005327 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328 * that process accounting knows that this is a task in IO wait state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329 */
5330void __sched io_schedule(void)
5331{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005332 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005334 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005335 atomic_inc(&rq->nr_iowait);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005336 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005337 schedule();
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005338 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005339 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005340 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342EXPORT_SYMBOL(io_schedule);
5343
5344long __sched io_schedule_timeout(long timeout)
5345{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005346 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005347 long ret;
5348
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005349 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005350 atomic_inc(&rq->nr_iowait);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005351 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005352 ret = schedule_timeout(timeout);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005353 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005354 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005355 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005356 return ret;
5357}
5358
5359/**
5360 * sys_sched_get_priority_max - return maximum RT priority.
5361 * @policy: scheduling class.
5362 *
5363 * this syscall returns the maximum rt_priority that can be used
5364 * by a given scheduling class.
5365 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005366SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005367{
5368 int ret = -EINVAL;
5369
5370 switch (policy) {
5371 case SCHED_FIFO:
5372 case SCHED_RR:
5373 ret = MAX_USER_RT_PRIO-1;
5374 break;
5375 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005376 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005377 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378 ret = 0;
5379 break;
5380 }
5381 return ret;
5382}
5383
5384/**
5385 * sys_sched_get_priority_min - return minimum RT priority.
5386 * @policy: scheduling class.
5387 *
5388 * this syscall returns the minimum rt_priority that can be used
5389 * by a given scheduling class.
5390 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005391SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392{
5393 int ret = -EINVAL;
5394
5395 switch (policy) {
5396 case SCHED_FIFO:
5397 case SCHED_RR:
5398 ret = 1;
5399 break;
5400 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005401 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005402 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403 ret = 0;
5404 }
5405 return ret;
5406}
5407
5408/**
5409 * sys_sched_rr_get_interval - return the default timeslice of a process.
5410 * @pid: pid of the process.
5411 * @interval: userspace pointer to the timeslice value.
5412 *
5413 * this syscall writes the default timeslice value of a given process
5414 * into the user-space timespec buffer. A value of '0' means infinity.
5415 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01005416SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
Heiko Carstens754fe8d2009-01-14 14:14:09 +01005417 struct timespec __user *, interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005419 struct task_struct *p;
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005420 unsigned int time_slice;
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005421 unsigned long flags;
5422 struct rq *rq;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005423 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424 struct timespec t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425
5426 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005427 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428
5429 retval = -ESRCH;
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005430 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005431 p = find_process_by_pid(pid);
5432 if (!p)
5433 goto out_unlock;
5434
5435 retval = security_task_getscheduler(p);
5436 if (retval)
5437 goto out_unlock;
5438
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005439 rq = task_rq_lock(p, &flags);
5440 time_slice = p->sched_class->get_rr_interval(rq, p);
5441 task_rq_unlock(rq, &flags);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005442
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005443 rcu_read_unlock();
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005444 jiffies_to_timespec(time_slice, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446 return retval;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005447
Linus Torvalds1da177e2005-04-16 15:20:36 -07005448out_unlock:
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005449 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450 return retval;
5451}
5452
Steven Rostedt7c731e02008-05-12 21:20:41 +02005453static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005454
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005455void sched_show_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005457 unsigned long free = 0;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005458 unsigned state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005459
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460 state = p->state ? __ffs(p->state) + 1 : 0;
Erik Gilling28d06862010-11-19 18:08:51 -08005461 printk(KERN_INFO "%-15.15s %c", p->comm,
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005462 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
Ingo Molnar4bd77322007-07-11 21:21:47 +02005463#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005465 printk(KERN_CONT " running ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005466 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005467 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005468#else
5469 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005470 printk(KERN_CONT " running task ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005471 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005472 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005473#endif
5474#ifdef CONFIG_DEBUG_STACK_USAGE
Eric Sandeen7c9f8862008-04-22 16:38:23 -05005475 free = stack_not_used(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005476#endif
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005477 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
David Rientjesaa47b7e2009-05-04 01:38:05 -07005478 task_pid_nr(p), task_pid_nr(p->real_parent),
5479 (unsigned long)task_thread_info(p)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005480
Nick Piggin5fb5e6d2008-01-25 21:08:34 +01005481 show_stack(p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005482}
5483
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005484void show_state_filter(unsigned long state_filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005485{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005486 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005487
Ingo Molnar4bd77322007-07-11 21:21:47 +02005488#if BITS_PER_LONG == 32
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005489 printk(KERN_INFO
5490 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005491#else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005492 printk(KERN_INFO
5493 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005494#endif
5495 read_lock(&tasklist_lock);
5496 do_each_thread(g, p) {
5497 /*
5498 * reset the NMI-timeout, listing all files on a slow
5499 * console might take alot of time:
5500 */
5501 touch_nmi_watchdog();
Ingo Molnar39bc89f2007-04-25 20:50:03 -07005502 if (!state_filter || (p->state & state_filter))
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005503 sched_show_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005504 } while_each_thread(g, p);
5505
Jeremy Fitzhardinge04c91672007-05-08 00:28:05 -07005506 touch_all_softlockup_watchdogs();
5507
Ingo Molnardd41f592007-07-09 18:51:59 +02005508#ifdef CONFIG_SCHED_DEBUG
5509 sysrq_sched_debug_show();
5510#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005511 read_unlock(&tasklist_lock);
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005512 /*
5513 * Only show locks if all tasks are dumped:
5514 */
Shmulik Ladkani93335a22009-11-25 15:23:41 +02005515 if (!state_filter)
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005516 debug_show_all_locks();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005517}
5518
Ingo Molnar1df21052007-07-09 18:51:58 +02005519void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5520{
Ingo Molnardd41f592007-07-09 18:51:59 +02005521 idle->sched_class = &idle_sched_class;
Ingo Molnar1df21052007-07-09 18:51:58 +02005522}
5523
Ingo Molnarf340c0d2005-06-28 16:40:42 +02005524/**
5525 * init_idle - set up an idle thread for a given CPU
5526 * @idle: task in question
5527 * @cpu: cpu the idle task belongs to
5528 *
5529 * NOTE: this function does not set the idle thread's NEED_RESCHED
5530 * flag, to make booting more robust.
5531 */
Nick Piggin5c1e1762006-10-03 01:14:04 -07005532void __cpuinit init_idle(struct task_struct *idle, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005533{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005534 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005535 unsigned long flags;
5536
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005537 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar5cbd54e2008-11-12 20:05:50 +01005538
Ingo Molnardd41f592007-07-09 18:51:59 +02005539 __sched_fork(idle);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01005540 idle->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02005541 idle->se.exec_start = sched_clock();
5542
Rusty Russell96f874e2008-11-25 02:35:14 +10305543 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005544 /*
5545 * We're having a chicken and egg problem, even though we are
5546 * holding rq->lock, the cpu isn't yet set to this cpu so the
5547 * lockdep check in task_group() will fail.
5548 *
5549 * Similar case to sched_fork(). / Alternatively we could
5550 * use task_rq_lock() here and obtain the other rq->lock.
5551 *
5552 * Silence PROVE_RCU
5553 */
5554 rcu_read_lock();
Ingo Molnardd41f592007-07-09 18:51:59 +02005555 __set_task_cpu(idle, cpu);
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005556 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005557
Linus Torvalds1da177e2005-04-16 15:20:36 -07005558 rq->curr = rq->idle = idle;
Nick Piggin4866cde2005-06-25 14:57:23 -07005559#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5560 idle->oncpu = 1;
5561#endif
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005562 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005563
5564 /* Set the preempt count _outside_ the spinlocks! */
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005565#if defined(CONFIG_PREEMPT)
5566 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5567#else
Al Viroa1261f52005-11-13 16:06:55 -08005568 task_thread_info(idle)->preempt_count = 0;
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005569#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02005570 /*
5571 * The idle tasks have their own, simple scheduling class:
5572 */
5573 idle->sched_class = &idle_sched_class;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01005574 ftrace_graph_init_task(idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005575}
5576
5577/*
5578 * In a system that switches off the HZ timer nohz_cpu_mask
5579 * indicates which cpus entered this state. This is used
5580 * in the rcu update to wait only for active cpus. For system
5581 * which do not switch off the HZ timer nohz_cpu_mask should
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305582 * always be CPU_BITS_NONE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005583 */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305584cpumask_var_t nohz_cpu_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005585
Ingo Molnar19978ca2007-11-09 22:39:38 +01005586/*
5587 * Increase the granularity value when there are more CPUs,
5588 * because with more CPUs the 'effective latency' as visible
5589 * to users decreases. But the relationship is not linear,
5590 * so pick a second-best guess by going with the log2 of the
5591 * number of CPUs.
5592 *
5593 * This idea comes from the SD scheduler of Con Kolivas:
5594 */
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005595static int get_update_sysctl_factor(void)
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005596{
Mike Galbraith4ca3ef72009-12-10 09:25:53 +01005597 unsigned int cpus = min_t(int, num_online_cpus(), 8);
Christian Ehrhardt1983a922009-11-30 12:16:47 +01005598 unsigned int factor;
5599
5600 switch (sysctl_sched_tunable_scaling) {
5601 case SCHED_TUNABLESCALING_NONE:
5602 factor = 1;
5603 break;
5604 case SCHED_TUNABLESCALING_LINEAR:
5605 factor = cpus;
5606 break;
5607 case SCHED_TUNABLESCALING_LOG:
5608 default:
5609 factor = 1 + ilog2(cpus);
5610 break;
5611 }
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005612
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005613 return factor;
5614}
5615
5616static void update_sysctl(void)
5617{
5618 unsigned int factor = get_update_sysctl_factor();
5619
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005620#define SET_SYSCTL(name) \
5621 (sysctl_##name = (factor) * normalized_sysctl_##name)
5622 SET_SYSCTL(sched_min_granularity);
5623 SET_SYSCTL(sched_latency);
5624 SET_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005625#undef SET_SYSCTL
5626}
5627
Ingo Molnar19978ca2007-11-09 22:39:38 +01005628static inline void sched_init_granularity(void)
5629{
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005630 update_sysctl();
Ingo Molnar19978ca2007-11-09 22:39:38 +01005631}
5632
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633#ifdef CONFIG_SMP
5634/*
5635 * This is how migration works:
5636 *
Tejun Heo969c7922010-05-06 18:49:21 +02005637 * 1) we invoke migration_cpu_stop() on the target CPU using
5638 * stop_one_cpu().
5639 * 2) stopper starts to run (implicitly forcing the migrated thread
5640 * off the CPU)
5641 * 3) it checks whether the migrated task is still in the wrong runqueue.
5642 * 4) if it's in the wrong runqueue then the migration thread removes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643 * it and puts it into the right queue.
Tejun Heo969c7922010-05-06 18:49:21 +02005644 * 5) stopper completes and stop_one_cpu() returns and the migration
5645 * is done.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005646 */
5647
5648/*
5649 * Change a given task's CPU affinity. Migrate the thread to a
5650 * proper CPU and schedule it away if the CPU it's executing on
5651 * is removed from the allowed bitmask.
5652 *
5653 * NOTE: the caller must have a valid reference to the task, the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005654 * task must not exit() & deallocate itself prematurely. The
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655 * call is not atomic; no spinlocks may be held.
5656 */
Rusty Russell96f874e2008-11-25 02:35:14 +10305657int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658{
5659 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005660 struct rq *rq;
Tejun Heo969c7922010-05-06 18:49:21 +02005661 unsigned int dest_cpu;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005662 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005663
Peter Zijlstra65cc8e42010-03-25 21:05:16 +01005664 /*
5665 * Serialize against TASK_WAKING so that ttwu() and wunt() can
5666 * drop the rq->lock and still rely on ->cpus_allowed.
5667 */
5668again:
5669 while (task_is_waking(p))
5670 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671 rq = task_rq_lock(p, &flags);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +01005672 if (task_is_waking(p)) {
5673 task_rq_unlock(rq, &flags);
5674 goto again;
5675 }
Peter Zijlstrae2912002009-12-16 18:04:36 +01005676
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005677 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678 ret = -EINVAL;
5679 goto out;
5680 }
5681
David Rientjes9985b0b2008-06-05 12:57:11 -07005682 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
Rusty Russell96f874e2008-11-25 02:35:14 +10305683 !cpumask_equal(&p->cpus_allowed, new_mask))) {
David Rientjes9985b0b2008-06-05 12:57:11 -07005684 ret = -EINVAL;
5685 goto out;
5686 }
5687
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005688 if (p->sched_class->set_cpus_allowed)
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005689 p->sched_class->set_cpus_allowed(p, new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005690 else {
Rusty Russell96f874e2008-11-25 02:35:14 +10305691 cpumask_copy(&p->cpus_allowed, new_mask);
5692 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005693 }
5694
Linus Torvalds1da177e2005-04-16 15:20:36 -07005695 /* Can the task run on the task's current CPU? If so, we're done */
Rusty Russell96f874e2008-11-25 02:35:14 +10305696 if (cpumask_test_cpu(task_cpu(p), new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005697 goto out;
5698
Tejun Heo969c7922010-05-06 18:49:21 +02005699 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
Nikanth Karthikesanb7a2b392010-11-26 12:37:09 +05305700 if (migrate_task(p, rq)) {
Tejun Heo969c7922010-05-06 18:49:21 +02005701 struct migration_arg arg = { p, dest_cpu };
Linus Torvalds1da177e2005-04-16 15:20:36 -07005702 /* Need help from migration thread: drop lock and wait. */
5703 task_rq_unlock(rq, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005704 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005705 tlb_migrate_finish(p->mm);
5706 return 0;
5707 }
5708out:
5709 task_rq_unlock(rq, &flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005710
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711 return ret;
5712}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005713EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005714
5715/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005716 * Move (not current) task off this cpu, onto dest cpu. We're doing
Linus Torvalds1da177e2005-04-16 15:20:36 -07005717 * this because either it can't run here any more (set_cpus_allowed()
5718 * away from this CPU, or CPU going down), or because we're
5719 * attempting to rebalance this task on exec (sched_exec).
5720 *
5721 * So we race with normal scheduler movements, but that's OK, as long
5722 * as the task is no longer on this CPU.
Kirill Korotaevefc30812006-06-27 02:54:32 -07005723 *
5724 * Returns non-zero if task was successfully migrated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005725 */
Kirill Korotaevefc30812006-06-27 02:54:32 -07005726static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005727{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005728 struct rq *rq_dest, *rq_src;
Peter Zijlstrae2912002009-12-16 18:04:36 +01005729 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730
Max Krasnyanskye761b772008-07-15 04:43:49 -07005731 if (unlikely(!cpu_active(dest_cpu)))
Kirill Korotaevefc30812006-06-27 02:54:32 -07005732 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005733
5734 rq_src = cpu_rq(src_cpu);
5735 rq_dest = cpu_rq(dest_cpu);
5736
5737 double_rq_lock(rq_src, rq_dest);
5738 /* Already moved. */
5739 if (task_cpu(p) != src_cpu)
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005740 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005741 /* Affinity changed (again). */
Rusty Russell96f874e2008-11-25 02:35:14 +10305742 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005743 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005744
Peter Zijlstrae2912002009-12-16 18:04:36 +01005745 /*
5746 * If we're not on a rq, the next wake-up will ensure we're
5747 * placed properly.
5748 */
5749 if (p->se.on_rq) {
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005750 deactivate_task(rq_src, p, 0);
Peter Zijlstrae2912002009-12-16 18:04:36 +01005751 set_task_cpu(p, dest_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005752 activate_task(rq_dest, p, 0);
Peter Zijlstra15afe092008-09-20 23:38:02 +02005753 check_preempt_curr(rq_dest, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005754 }
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005755done:
Kirill Korotaevefc30812006-06-27 02:54:32 -07005756 ret = 1;
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005757fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005758 double_rq_unlock(rq_src, rq_dest);
Kirill Korotaevefc30812006-06-27 02:54:32 -07005759 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005760}
5761
5762/*
Tejun Heo969c7922010-05-06 18:49:21 +02005763 * migration_cpu_stop - this will be executed by a highprio stopper thread
5764 * and performs thread migration by bumping thread off CPU then
5765 * 'pushing' onto another runqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005766 */
Tejun Heo969c7922010-05-06 18:49:21 +02005767static int migration_cpu_stop(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005768{
Tejun Heo969c7922010-05-06 18:49:21 +02005769 struct migration_arg *arg = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005770
Tejun Heo969c7922010-05-06 18:49:21 +02005771 /*
5772 * The original target cpu might have gone down and we might
5773 * be on another cpu but it doesn't matter.
5774 */
5775 local_irq_disable();
5776 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5777 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005778 return 0;
5779}
5780
5781#ifdef CONFIG_HOTPLUG_CPU
Linus Torvalds1da177e2005-04-16 15:20:36 -07005782
Ingo Molnar48f24c42006-07-03 00:25:40 -07005783/*
5784 * Ensures that the idle task is using init_mm right before its cpu goes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005785 * offline.
5786 */
5787void idle_task_exit(void)
5788{
5789 struct mm_struct *mm = current->active_mm;
5790
5791 BUG_ON(cpu_online(smp_processor_id()));
5792
5793 if (mm != &init_mm)
5794 switch_mm(mm, &init_mm, current);
5795 mmdrop(mm);
5796}
5797
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01005798/*
5799 * While a dead CPU has no uninterruptible tasks queued at this point,
5800 * it might still have a nonzero ->nr_uninterruptible counter, because
5801 * for performance reasons the counter is not stricly tracking tasks to
5802 * their home CPUs. So we just add the counter to another CPU's counter,
5803 * to keep the global sum constant after CPU-down:
5804 */
5805static void migrate_nr_uninterruptible(struct rq *rq_src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005806{
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01005807 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005808
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01005809 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5810 rq_src->nr_uninterruptible = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005811}
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02005812
5813/*
5814 * remove the tasks which were accounted by rq from calc_load_tasks.
5815 */
5816static void calc_global_load_remove(struct rq *rq)
5817{
5818 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
Thomas Gleixnera468d382009-07-17 14:15:46 +02005819 rq->calc_load_active = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02005820}
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01005821
5822/*
5823 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5824 * try_to_wake_up()->select_task_rq().
5825 *
5826 * Called with rq->lock held even though we'er in stop_machine() and
5827 * there's no concurrency possible, we hold the required locks anyway
5828 * because of lock validation efforts.
5829 */
5830static void migrate_tasks(unsigned int dead_cpu)
5831{
5832 struct rq *rq = cpu_rq(dead_cpu);
5833 struct task_struct *next, *stop = rq->stop;
5834 int dest_cpu;
5835
5836 /*
5837 * Fudge the rq selection such that the below task selection loop
5838 * doesn't get stuck on the currently eligible stop task.
5839 *
5840 * We're currently inside stop_machine() and the rq is either stuck
5841 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5842 * either way we should never end up calling schedule() until we're
5843 * done here.
5844 */
5845 rq->stop = NULL;
5846
5847 for ( ; ; ) {
5848 /*
5849 * There's this thread running, bail when that's the only
5850 * remaining thread.
5851 */
5852 if (rq->nr_running == 1)
5853 break;
5854
5855 next = pick_next_task(rq);
5856 BUG_ON(!next);
5857 next->sched_class->put_prev_task(rq, next);
5858
5859 /* Find suitable destination for @next, with force if needed. */
5860 dest_cpu = select_fallback_rq(dead_cpu, next);
5861 raw_spin_unlock(&rq->lock);
5862
5863 __migrate_task(next, dead_cpu, dest_cpu);
5864
5865 raw_spin_lock(&rq->lock);
5866 }
5867
5868 rq->stop = stop;
5869}
5870
Linus Torvalds1da177e2005-04-16 15:20:36 -07005871#endif /* CONFIG_HOTPLUG_CPU */
5872
Nick Piggine692ab52007-07-26 13:40:43 +02005873#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5874
5875static struct ctl_table sd_ctl_dir[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02005876 {
5877 .procname = "sched_domain",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005878 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02005879 },
Eric W. Biederman56992302009-11-05 15:38:40 -08005880 {}
Nick Piggine692ab52007-07-26 13:40:43 +02005881};
5882
5883static struct ctl_table sd_ctl_root[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02005884 {
5885 .procname = "kernel",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005886 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02005887 .child = sd_ctl_dir,
5888 },
Eric W. Biederman56992302009-11-05 15:38:40 -08005889 {}
Nick Piggine692ab52007-07-26 13:40:43 +02005890};
5891
5892static struct ctl_table *sd_alloc_ctl_entry(int n)
5893{
5894 struct ctl_table *entry =
Milton Miller5cf9f062007-10-15 17:00:19 +02005895 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
Nick Piggine692ab52007-07-26 13:40:43 +02005896
Nick Piggine692ab52007-07-26 13:40:43 +02005897 return entry;
5898}
5899
Milton Miller6382bc92007-10-15 17:00:19 +02005900static void sd_free_ctl_entry(struct ctl_table **tablep)
5901{
Milton Millercd7900762007-10-17 16:55:11 +02005902 struct ctl_table *entry;
Milton Miller6382bc92007-10-15 17:00:19 +02005903
Milton Millercd7900762007-10-17 16:55:11 +02005904 /*
5905 * In the intermediate directories, both the child directory and
5906 * procname are dynamically allocated and could fail but the mode
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005907 * will always be set. In the lowest directory the names are
Milton Millercd7900762007-10-17 16:55:11 +02005908 * static strings and all have proc handlers.
5909 */
5910 for (entry = *tablep; entry->mode; entry++) {
Milton Miller6382bc92007-10-15 17:00:19 +02005911 if (entry->child)
5912 sd_free_ctl_entry(&entry->child);
Milton Millercd7900762007-10-17 16:55:11 +02005913 if (entry->proc_handler == NULL)
5914 kfree(entry->procname);
5915 }
Milton Miller6382bc92007-10-15 17:00:19 +02005916
5917 kfree(*tablep);
5918 *tablep = NULL;
5919}
5920
Nick Piggine692ab52007-07-26 13:40:43 +02005921static void
Alexey Dobriyane0361852007-08-09 11:16:46 +02005922set_table_entry(struct ctl_table *entry,
Nick Piggine692ab52007-07-26 13:40:43 +02005923 const char *procname, void *data, int maxlen,
5924 mode_t mode, proc_handler *proc_handler)
5925{
Nick Piggine692ab52007-07-26 13:40:43 +02005926 entry->procname = procname;
5927 entry->data = data;
5928 entry->maxlen = maxlen;
5929 entry->mode = mode;
5930 entry->proc_handler = proc_handler;
5931}
5932
5933static struct ctl_table *
5934sd_alloc_ctl_domain_table(struct sched_domain *sd)
5935{
Ingo Molnara5d8c342008-10-09 11:35:51 +02005936 struct ctl_table *table = sd_alloc_ctl_entry(13);
Nick Piggine692ab52007-07-26 13:40:43 +02005937
Milton Millerad1cdc12007-10-15 17:00:19 +02005938 if (table == NULL)
5939 return NULL;
5940
Alexey Dobriyane0361852007-08-09 11:16:46 +02005941 set_table_entry(&table[0], "min_interval", &sd->min_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02005942 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005943 set_table_entry(&table[1], "max_interval", &sd->max_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02005944 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005945 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005946 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005947 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005948 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005949 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005950 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005951 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005952 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005953 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02005954 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005955 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
Nick Piggine692ab52007-07-26 13:40:43 +02005956 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02005957 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
Nick Piggine692ab52007-07-26 13:40:43 +02005958 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02005959 set_table_entry(&table[9], "cache_nice_tries",
Nick Piggine692ab52007-07-26 13:40:43 +02005960 &sd->cache_nice_tries,
5961 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02005962 set_table_entry(&table[10], "flags", &sd->flags,
Nick Piggine692ab52007-07-26 13:40:43 +02005963 sizeof(int), 0644, proc_dointvec_minmax);
Ingo Molnara5d8c342008-10-09 11:35:51 +02005964 set_table_entry(&table[11], "name", sd->name,
5965 CORENAME_MAX_SIZE, 0444, proc_dostring);
5966 /* &table[12] is terminator */
Nick Piggine692ab52007-07-26 13:40:43 +02005967
5968 return table;
5969}
5970
Ingo Molnar9a4e7152007-11-28 15:52:56 +01005971static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
Nick Piggine692ab52007-07-26 13:40:43 +02005972{
5973 struct ctl_table *entry, *table;
5974 struct sched_domain *sd;
5975 int domain_num = 0, i;
5976 char buf[32];
5977
5978 for_each_domain(cpu, sd)
5979 domain_num++;
5980 entry = table = sd_alloc_ctl_entry(domain_num + 1);
Milton Millerad1cdc12007-10-15 17:00:19 +02005981 if (table == NULL)
5982 return NULL;
Nick Piggine692ab52007-07-26 13:40:43 +02005983
5984 i = 0;
5985 for_each_domain(cpu, sd) {
5986 snprintf(buf, 32, "domain%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02005987 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02005988 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02005989 entry->child = sd_alloc_ctl_domain_table(sd);
5990 entry++;
5991 i++;
5992 }
5993 return table;
5994}
5995
5996static struct ctl_table_header *sd_sysctl_header;
Milton Miller6382bc92007-10-15 17:00:19 +02005997static void register_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02005998{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005999 int i, cpu_num = num_possible_cpus();
Nick Piggine692ab52007-07-26 13:40:43 +02006000 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6001 char buf[32];
6002
Milton Miller73785472007-10-24 18:23:48 +02006003 WARN_ON(sd_ctl_dir[0].child);
6004 sd_ctl_dir[0].child = entry;
6005
Milton Millerad1cdc12007-10-15 17:00:19 +02006006 if (entry == NULL)
6007 return;
6008
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006009 for_each_possible_cpu(i) {
Nick Piggine692ab52007-07-26 13:40:43 +02006010 snprintf(buf, 32, "cpu%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006011 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006012 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006013 entry->child = sd_alloc_ctl_cpu_table(i);
Milton Miller97b6ea72007-10-15 17:00:19 +02006014 entry++;
Nick Piggine692ab52007-07-26 13:40:43 +02006015 }
Milton Miller73785472007-10-24 18:23:48 +02006016
6017 WARN_ON(sd_sysctl_header);
Nick Piggine692ab52007-07-26 13:40:43 +02006018 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6019}
Milton Miller6382bc92007-10-15 17:00:19 +02006020
Milton Miller73785472007-10-24 18:23:48 +02006021/* may be called multiple times per register */
Milton Miller6382bc92007-10-15 17:00:19 +02006022static void unregister_sched_domain_sysctl(void)
6023{
Milton Miller73785472007-10-24 18:23:48 +02006024 if (sd_sysctl_header)
6025 unregister_sysctl_table(sd_sysctl_header);
Milton Miller6382bc92007-10-15 17:00:19 +02006026 sd_sysctl_header = NULL;
Milton Miller73785472007-10-24 18:23:48 +02006027 if (sd_ctl_dir[0].child)
6028 sd_free_ctl_entry(&sd_ctl_dir[0].child);
Milton Miller6382bc92007-10-15 17:00:19 +02006029}
Nick Piggine692ab52007-07-26 13:40:43 +02006030#else
Milton Miller6382bc92007-10-15 17:00:19 +02006031static void register_sched_domain_sysctl(void)
6032{
6033}
6034static void unregister_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006035{
6036}
6037#endif
6038
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006039static void set_rq_online(struct rq *rq)
6040{
6041 if (!rq->online) {
6042 const struct sched_class *class;
6043
Rusty Russellc6c49272008-11-25 02:35:05 +10306044 cpumask_set_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006045 rq->online = 1;
6046
6047 for_each_class(class) {
6048 if (class->rq_online)
6049 class->rq_online(rq);
6050 }
6051 }
6052}
6053
6054static void set_rq_offline(struct rq *rq)
6055{
6056 if (rq->online) {
6057 const struct sched_class *class;
6058
6059 for_each_class(class) {
6060 if (class->rq_offline)
6061 class->rq_offline(rq);
6062 }
6063
Rusty Russellc6c49272008-11-25 02:35:05 +10306064 cpumask_clear_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006065 rq->online = 0;
6066 }
6067}
6068
Linus Torvalds1da177e2005-04-16 15:20:36 -07006069/*
6070 * migration_call - callback that gets triggered when a CPU is added.
6071 * Here we can start up the necessary migration thread for the new CPU.
6072 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006073static int __cpuinit
6074migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006075{
Ingo Molnar48f24c42006-07-03 00:25:40 -07006076 int cpu = (long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006077 unsigned long flags;
Tejun Heo969c7922010-05-06 18:49:21 +02006078 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006079
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006080 switch (action & ~CPU_TASKS_FROZEN) {
Gautham R Shenoy5be93612007-05-09 02:34:04 -07006081
Linus Torvalds1da177e2005-04-16 15:20:36 -07006082 case CPU_UP_PREPARE:
Thomas Gleixnera468d382009-07-17 14:15:46 +02006083 rq->calc_load_update = calc_load_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006084 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006085
Linus Torvalds1da177e2005-04-16 15:20:36 -07006086 case CPU_ONLINE:
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006087 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006088 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006089 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306090 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006091
6092 set_rq_online(rq);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006093 }
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006094 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006095 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006096
Linus Torvalds1da177e2005-04-16 15:20:36 -07006097#ifdef CONFIG_HOTPLUG_CPU
Gregory Haskins08f503b2008-03-10 17:59:11 -04006098 case CPU_DYING:
Gregory Haskins57d885f2008-01-25 21:08:18 +01006099 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006100 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006101 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306102 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006103 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006104 }
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006105 migrate_tasks(cpu);
6106 BUG_ON(rq->nr_running != 1); /* the migration thread */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006107 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006108
6109 migrate_nr_uninterruptible(rq);
6110 calc_global_load_remove(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006111 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006112#endif
6113 }
6114 return NOTIFY_OK;
6115}
6116
Paul Mackerrasf38b0822009-06-02 21:05:16 +10006117/*
6118 * Register at high priority so that task migration (migrate_all_tasks)
6119 * happens before everything else. This has to be lower priority than
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006120 * the notifier in the perf_event subsystem, though.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006121 */
Chandra Seetharaman26c21432006-06-27 02:54:10 -07006122static struct notifier_block __cpuinitdata migration_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006123 .notifier_call = migration_call,
Tejun Heo50a323b2010-06-08 21:40:36 +02006124 .priority = CPU_PRI_MIGRATION,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006125};
6126
Tejun Heo3a101d02010-06-08 21:40:36 +02006127static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
6128 unsigned long action, void *hcpu)
6129{
6130 switch (action & ~CPU_TASKS_FROZEN) {
6131 case CPU_ONLINE:
6132 case CPU_DOWN_FAILED:
6133 set_cpu_active((long)hcpu, true);
6134 return NOTIFY_OK;
6135 default:
6136 return NOTIFY_DONE;
6137 }
6138}
6139
6140static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6141 unsigned long action, void *hcpu)
6142{
6143 switch (action & ~CPU_TASKS_FROZEN) {
6144 case CPU_DOWN_PREPARE:
6145 set_cpu_active((long)hcpu, false);
6146 return NOTIFY_OK;
6147 default:
6148 return NOTIFY_DONE;
6149 }
6150}
6151
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006152static int __init migration_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006153{
6154 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -07006155 int err;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006156
Tejun Heo3a101d02010-06-08 21:40:36 +02006157 /* Initialize migration for the boot CPU */
Akinobu Mita07dccf32006-09-29 02:00:22 -07006158 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6159 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006160 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6161 register_cpu_notifier(&migration_notifier);
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006162
Tejun Heo3a101d02010-06-08 21:40:36 +02006163 /* Register cpu active notifiers */
6164 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6165 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6166
Thomas Gleixnera004cd42009-07-21 09:54:05 +02006167 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006168}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006169early_initcall(migration_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006170#endif
6171
6172#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07006173
Ingo Molnar3e9830d2007-10-15 17:00:13 +02006174#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006175
Mike Travisf6630112009-11-17 18:22:15 -06006176static __read_mostly int sched_domain_debug_enabled;
6177
6178static int __init sched_domain_debug_setup(char *str)
6179{
6180 sched_domain_debug_enabled = 1;
6181
6182 return 0;
6183}
6184early_param("sched_debug", sched_domain_debug_setup);
6185
Mike Travis7c16ec52008-04-04 18:11:11 -07006186static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
Rusty Russell96f874e2008-11-25 02:35:14 +10306187 struct cpumask *groupmask)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006188{
6189 struct sched_group *group = sd->groups;
Mike Travis434d53b2008-04-04 18:11:04 -07006190 char str[256];
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006191
Rusty Russell968ea6d2008-12-13 21:55:51 +10306192 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
Rusty Russell96f874e2008-11-25 02:35:14 +10306193 cpumask_clear(groupmask);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006194
6195 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6196
6197 if (!(sd->flags & SD_LOAD_BALANCE)) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006198 printk("does not load-balance\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006199 if (sd->parent)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006200 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6201 " has parent");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006202 return -1;
6203 }
6204
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006205 printk(KERN_CONT "span %s level %s\n", str, sd->name);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006206
Rusty Russell758b2cd2008-11-25 02:35:04 +10306207 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006208 printk(KERN_ERR "ERROR: domain->span does not contain "
6209 "CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006210 }
Rusty Russell758b2cd2008-11-25 02:35:04 +10306211 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006212 printk(KERN_ERR "ERROR: domain->groups does not contain"
6213 " CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006214 }
6215
6216 printk(KERN_DEBUG "%*s groups:", level + 1, "");
6217 do {
6218 if (!group) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006219 printk("\n");
6220 printk(KERN_ERR "ERROR: group is NULL\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006221 break;
6222 }
6223
Peter Zijlstra18a38852009-09-01 10:34:39 +02006224 if (!group->cpu_power) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006225 printk(KERN_CONT "\n");
6226 printk(KERN_ERR "ERROR: domain->cpu_power not "
6227 "set\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006228 break;
6229 }
6230
Rusty Russell758b2cd2008-11-25 02:35:04 +10306231 if (!cpumask_weight(sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006232 printk(KERN_CONT "\n");
6233 printk(KERN_ERR "ERROR: empty group\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006234 break;
6235 }
6236
Rusty Russell758b2cd2008-11-25 02:35:04 +10306237 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006238 printk(KERN_CONT "\n");
6239 printk(KERN_ERR "ERROR: repeated CPUs\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006240 break;
6241 }
6242
Rusty Russell758b2cd2008-11-25 02:35:04 +10306243 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006244
Rusty Russell968ea6d2008-12-13 21:55:51 +10306245 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306246
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006247 printk(KERN_CONT " %s", str);
Peter Zijlstra18a38852009-09-01 10:34:39 +02006248 if (group->cpu_power != SCHED_LOAD_SCALE) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006249 printk(KERN_CONT " (cpu_power = %d)",
6250 group->cpu_power);
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306251 }
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006252
6253 group = group->next;
6254 } while (group != sd->groups);
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006255 printk(KERN_CONT "\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006256
Rusty Russell758b2cd2008-11-25 02:35:04 +10306257 if (!cpumask_equal(sched_domain_span(sd), groupmask))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006258 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006259
Rusty Russell758b2cd2008-11-25 02:35:04 +10306260 if (sd->parent &&
6261 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006262 printk(KERN_ERR "ERROR: parent span is not a superset "
6263 "of domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006264 return 0;
6265}
6266
Linus Torvalds1da177e2005-04-16 15:20:36 -07006267static void sched_domain_debug(struct sched_domain *sd, int cpu)
6268{
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306269 cpumask_var_t groupmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006270 int level = 0;
6271
Mike Travisf6630112009-11-17 18:22:15 -06006272 if (!sched_domain_debug_enabled)
6273 return;
6274
Nick Piggin41c7ce92005-06-25 14:57:24 -07006275 if (!sd) {
6276 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6277 return;
6278 }
6279
Linus Torvalds1da177e2005-04-16 15:20:36 -07006280 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6281
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306282 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006283 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6284 return;
6285 }
6286
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006287 for (;;) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006288 if (sched_domain_debug_one(sd, cpu, level, groupmask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006289 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006290 level++;
6291 sd = sd->parent;
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08006292 if (!sd)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006293 break;
6294 }
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306295 free_cpumask_var(groupmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006296}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006297#else /* !CONFIG_SCHED_DEBUG */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006298# define sched_domain_debug(sd, cpu) do { } while (0)
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006299#endif /* CONFIG_SCHED_DEBUG */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006300
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006301static int sd_degenerate(struct sched_domain *sd)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006302{
Rusty Russell758b2cd2008-11-25 02:35:04 +10306303 if (cpumask_weight(sched_domain_span(sd)) == 1)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006304 return 1;
6305
6306 /* Following flags need at least 2 groups */
6307 if (sd->flags & (SD_LOAD_BALANCE |
6308 SD_BALANCE_NEWIDLE |
6309 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006310 SD_BALANCE_EXEC |
6311 SD_SHARE_CPUPOWER |
6312 SD_SHARE_PKG_RESOURCES)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006313 if (sd->groups != sd->groups->next)
6314 return 0;
6315 }
6316
6317 /* Following flags don't use groups */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02006318 if (sd->flags & (SD_WAKE_AFFINE))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006319 return 0;
6320
6321 return 1;
6322}
6323
Ingo Molnar48f24c42006-07-03 00:25:40 -07006324static int
6325sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006326{
6327 unsigned long cflags = sd->flags, pflags = parent->flags;
6328
6329 if (sd_degenerate(parent))
6330 return 1;
6331
Rusty Russell758b2cd2008-11-25 02:35:04 +10306332 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006333 return 0;
6334
Suresh Siddha245af2c2005-06-25 14:57:25 -07006335 /* Flags needing groups don't count if only 1 group in parent */
6336 if (parent->groups == parent->groups->next) {
6337 pflags &= ~(SD_LOAD_BALANCE |
6338 SD_BALANCE_NEWIDLE |
6339 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006340 SD_BALANCE_EXEC |
6341 SD_SHARE_CPUPOWER |
6342 SD_SHARE_PKG_RESOURCES);
Ken Chen54364992008-12-07 18:47:37 -08006343 if (nr_node_ids == 1)
6344 pflags &= ~SD_SERIALIZE;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006345 }
6346 if (~cflags & pflags)
6347 return 0;
6348
6349 return 1;
6350}
6351
Rusty Russellc6c49272008-11-25 02:35:05 +10306352static void free_rootdomain(struct root_domain *rd)
6353{
Peter Zijlstra047106a2009-11-16 10:28:09 +01006354 synchronize_sched();
6355
Rusty Russell68e74562008-11-25 02:35:13 +10306356 cpupri_cleanup(&rd->cpupri);
6357
Rusty Russellc6c49272008-11-25 02:35:05 +10306358 free_cpumask_var(rd->rto_mask);
6359 free_cpumask_var(rd->online);
6360 free_cpumask_var(rd->span);
6361 kfree(rd);
6362}
6363
Gregory Haskins57d885f2008-01-25 21:08:18 +01006364static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6365{
Ingo Molnara0490fa2009-02-12 11:35:40 +01006366 struct root_domain *old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006367 unsigned long flags;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006368
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006369 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006370
6371 if (rq->rd) {
Ingo Molnara0490fa2009-02-12 11:35:40 +01006372 old_rd = rq->rd;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006373
Rusty Russellc6c49272008-11-25 02:35:05 +10306374 if (cpumask_test_cpu(rq->cpu, old_rd->online))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006375 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006376
Rusty Russellc6c49272008-11-25 02:35:05 +10306377 cpumask_clear_cpu(rq->cpu, old_rd->span);
Gregory Haskinsdc938522008-01-25 21:08:26 +01006378
Ingo Molnara0490fa2009-02-12 11:35:40 +01006379 /*
6380 * If we dont want to free the old_rt yet then
6381 * set old_rd to NULL to skip the freeing later
6382 * in this function:
6383 */
6384 if (!atomic_dec_and_test(&old_rd->refcount))
6385 old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006386 }
6387
6388 atomic_inc(&rd->refcount);
6389 rq->rd = rd;
6390
Rusty Russellc6c49272008-11-25 02:35:05 +10306391 cpumask_set_cpu(rq->cpu, rd->span);
Gregory Haskins00aec932009-07-30 10:57:23 -04006392 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006393 set_rq_online(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006394
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006395 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnara0490fa2009-02-12 11:35:40 +01006396
6397 if (old_rd)
6398 free_rootdomain(old_rd);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006399}
6400
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006401static int init_rootdomain(struct root_domain *rd)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006402{
6403 memset(rd, 0, sizeof(*rd));
6404
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006405 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
Li Zefan0c910d22009-01-06 17:39:06 +08006406 goto out;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006407 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306408 goto free_span;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006409 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306410 goto free_online;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02006411
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006412 if (cpupri_init(&rd->cpupri) != 0)
Rusty Russell68e74562008-11-25 02:35:13 +10306413 goto free_rto_mask;
Rusty Russellc6c49272008-11-25 02:35:05 +10306414 return 0;
6415
Rusty Russell68e74562008-11-25 02:35:13 +10306416free_rto_mask:
6417 free_cpumask_var(rd->rto_mask);
Rusty Russellc6c49272008-11-25 02:35:05 +10306418free_online:
6419 free_cpumask_var(rd->online);
6420free_span:
6421 free_cpumask_var(rd->span);
Li Zefan0c910d22009-01-06 17:39:06 +08006422out:
Rusty Russellc6c49272008-11-25 02:35:05 +10306423 return -ENOMEM;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006424}
6425
6426static void init_defrootdomain(void)
6427{
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006428 init_rootdomain(&def_root_domain);
Rusty Russellc6c49272008-11-25 02:35:05 +10306429
Gregory Haskins57d885f2008-01-25 21:08:18 +01006430 atomic_set(&def_root_domain.refcount, 1);
6431}
6432
Gregory Haskinsdc938522008-01-25 21:08:26 +01006433static struct root_domain *alloc_rootdomain(void)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006434{
6435 struct root_domain *rd;
6436
6437 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6438 if (!rd)
6439 return NULL;
6440
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006441 if (init_rootdomain(rd) != 0) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306442 kfree(rd);
6443 return NULL;
6444 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01006445
6446 return rd;
6447}
6448
Linus Torvalds1da177e2005-04-16 15:20:36 -07006449/*
Ingo Molnar0eab9142008-01-25 21:08:19 +01006450 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
Linus Torvalds1da177e2005-04-16 15:20:36 -07006451 * hold the hotplug lock.
6452 */
Ingo Molnar0eab9142008-01-25 21:08:19 +01006453static void
6454cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006455{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006456 struct rq *rq = cpu_rq(cpu);
Suresh Siddha245af2c2005-06-25 14:57:25 -07006457 struct sched_domain *tmp;
6458
Peter Zijlstra669c55e2010-04-16 14:59:29 +02006459 for (tmp = sd; tmp; tmp = tmp->parent)
6460 tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
6461
Suresh Siddha245af2c2005-06-25 14:57:25 -07006462 /* Remove the sched domains which do not contribute to scheduling. */
Li Zefanf29c9b12008-11-06 09:45:16 +08006463 for (tmp = sd; tmp; ) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006464 struct sched_domain *parent = tmp->parent;
6465 if (!parent)
6466 break;
Li Zefanf29c9b12008-11-06 09:45:16 +08006467
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006468 if (sd_parent_degenerate(tmp, parent)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006469 tmp->parent = parent->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006470 if (parent->parent)
6471 parent->parent->child = tmp;
Li Zefanf29c9b12008-11-06 09:45:16 +08006472 } else
6473 tmp = tmp->parent;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006474 }
6475
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006476 if (sd && sd_degenerate(sd)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006477 sd = sd->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006478 if (sd)
6479 sd->child = NULL;
6480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006481
6482 sched_domain_debug(sd, cpu);
6483
Gregory Haskins57d885f2008-01-25 21:08:18 +01006484 rq_attach_root(rq, rd);
Nick Piggin674311d2005-06-25 14:57:27 -07006485 rcu_assign_pointer(rq->sd, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006486}
6487
6488/* cpus with isolated domains */
Rusty Russelldcc30a32008-11-25 02:35:12 +10306489static cpumask_var_t cpu_isolated_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006490
6491/* Setup the mask of cpus configured for isolated domains */
6492static int __init isolated_cpu_setup(char *str)
6493{
Rusty Russellbdddd292009-12-02 14:09:16 +10306494 alloc_bootmem_cpumask_var(&cpu_isolated_map);
Rusty Russell968ea6d2008-12-13 21:55:51 +10306495 cpulist_parse(str, cpu_isolated_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006496 return 1;
6497}
6498
Ingo Molnar8927f492007-10-15 17:00:13 +02006499__setup("isolcpus=", isolated_cpu_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006500
6501/*
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006502 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
6503 * to a function which identifies what group(along with sched group) a CPU
Rusty Russell96f874e2008-11-25 02:35:14 +10306504 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6505 * (due to the fact that we keep track of groups covered with a struct cpumask).
Linus Torvalds1da177e2005-04-16 15:20:36 -07006506 *
6507 * init_sched_build_groups will build a circular linked list of the groups
6508 * covered by the given span, and will set each group's ->cpumask correctly,
6509 * and ->cpu_power to 0.
6510 */
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006511static void
Rusty Russell96f874e2008-11-25 02:35:14 +10306512init_sched_build_groups(const struct cpumask *span,
6513 const struct cpumask *cpu_map,
6514 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
Mike Travis7c16ec52008-04-04 18:11:11 -07006515 struct sched_group **sg,
Rusty Russell96f874e2008-11-25 02:35:14 +10306516 struct cpumask *tmpmask),
6517 struct cpumask *covered, struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006518{
6519 struct sched_group *first = NULL, *last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006520 int i;
6521
Rusty Russell96f874e2008-11-25 02:35:14 +10306522 cpumask_clear(covered);
Mike Travis7c16ec52008-04-04 18:11:11 -07006523
Rusty Russellabcd0832008-11-25 02:35:02 +10306524 for_each_cpu(i, span) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006525 struct sched_group *sg;
Mike Travis7c16ec52008-04-04 18:11:11 -07006526 int group = group_fn(i, cpu_map, &sg, tmpmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006527 int j;
6528
Rusty Russell758b2cd2008-11-25 02:35:04 +10306529 if (cpumask_test_cpu(i, covered))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006530 continue;
6531
Rusty Russell758b2cd2008-11-25 02:35:04 +10306532 cpumask_clear(sched_group_cpus(sg));
Peter Zijlstra18a38852009-09-01 10:34:39 +02006533 sg->cpu_power = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006534
Rusty Russellabcd0832008-11-25 02:35:02 +10306535 for_each_cpu(j, span) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006536 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006537 continue;
6538
Rusty Russell96f874e2008-11-25 02:35:14 +10306539 cpumask_set_cpu(j, covered);
Rusty Russell758b2cd2008-11-25 02:35:04 +10306540 cpumask_set_cpu(j, sched_group_cpus(sg));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006541 }
6542 if (!first)
6543 first = sg;
6544 if (last)
6545 last->next = sg;
6546 last = sg;
6547 }
6548 last->next = first;
6549}
6550
John Hawkes9c1cfda2005-09-06 15:18:14 -07006551#define SD_NODES_PER_DOMAIN 16
Linus Torvalds1da177e2005-04-16 15:20:36 -07006552
John Hawkes9c1cfda2005-09-06 15:18:14 -07006553#ifdef CONFIG_NUMA
akpm@osdl.org198e2f12006-01-12 01:05:30 -08006554
John Hawkes9c1cfda2005-09-06 15:18:14 -07006555/**
6556 * find_next_best_node - find the next node to include in a sched_domain
6557 * @node: node whose sched_domain we're building
6558 * @used_nodes: nodes already in the sched_domain
6559 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006560 * Find the next node to include in a given scheduling domain. Simply
John Hawkes9c1cfda2005-09-06 15:18:14 -07006561 * finds the closest node not already in the @used_nodes map.
6562 *
6563 * Should use nodemask_t.
6564 */
Mike Travisc5f59f02008-04-04 18:11:10 -07006565static int find_next_best_node(int node, nodemask_t *used_nodes)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006566{
6567 int i, n, val, min_val, best_node = 0;
6568
6569 min_val = INT_MAX;
6570
Mike Travis076ac2a2008-05-12 21:21:12 +02006571 for (i = 0; i < nr_node_ids; i++) {
John Hawkes9c1cfda2005-09-06 15:18:14 -07006572 /* Start at @node */
Mike Travis076ac2a2008-05-12 21:21:12 +02006573 n = (node + i) % nr_node_ids;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006574
6575 if (!nr_cpus_node(n))
6576 continue;
6577
6578 /* Skip already used nodes */
Mike Travisc5f59f02008-04-04 18:11:10 -07006579 if (node_isset(n, *used_nodes))
John Hawkes9c1cfda2005-09-06 15:18:14 -07006580 continue;
6581
6582 /* Simple min distance search */
6583 val = node_distance(node, n);
6584
6585 if (val < min_val) {
6586 min_val = val;
6587 best_node = n;
6588 }
6589 }
6590
Mike Travisc5f59f02008-04-04 18:11:10 -07006591 node_set(best_node, *used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006592 return best_node;
6593}
6594
6595/**
6596 * sched_domain_node_span - get a cpumask for a node's sched_domain
6597 * @node: node whose cpumask we're constructing
Randy Dunlap73486722008-04-22 10:07:22 -07006598 * @span: resulting cpumask
John Hawkes9c1cfda2005-09-06 15:18:14 -07006599 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006600 * Given a node, construct a good cpumask for its sched_domain to span. It
John Hawkes9c1cfda2005-09-06 15:18:14 -07006601 * should be one that prevents unnecessary balancing, but also spreads tasks
6602 * out optimally.
6603 */
Rusty Russell96f874e2008-11-25 02:35:14 +10306604static void sched_domain_node_span(int node, struct cpumask *span)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006605{
Mike Travisc5f59f02008-04-04 18:11:10 -07006606 nodemask_t used_nodes;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006607 int i;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006608
Mike Travis6ca09df2008-12-31 18:08:45 -08006609 cpumask_clear(span);
Mike Travisc5f59f02008-04-04 18:11:10 -07006610 nodes_clear(used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006611
Mike Travis6ca09df2008-12-31 18:08:45 -08006612 cpumask_or(span, span, cpumask_of_node(node));
Mike Travisc5f59f02008-04-04 18:11:10 -07006613 node_set(node, used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006614
6615 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
Mike Travisc5f59f02008-04-04 18:11:10 -07006616 int next_node = find_next_best_node(node, &used_nodes);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006617
Mike Travis6ca09df2008-12-31 18:08:45 -08006618 cpumask_or(span, span, cpumask_of_node(next_node));
John Hawkes9c1cfda2005-09-06 15:18:14 -07006619 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006620}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006621#endif /* CONFIG_NUMA */
John Hawkes9c1cfda2005-09-06 15:18:14 -07006622
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006623int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006624
John Hawkes9c1cfda2005-09-06 15:18:14 -07006625/*
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306626 * The cpus mask in sched_group and sched_domain hangs off the end.
Ingo Molnar4200efd2009-05-19 09:22:19 +02006627 *
6628 * ( See the the comments in include/linux/sched.h:struct sched_group
6629 * and struct sched_domain. )
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306630 */
6631struct static_sched_group {
6632 struct sched_group sg;
6633 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
6634};
6635
6636struct static_sched_domain {
6637 struct sched_domain sd;
6638 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
6639};
6640
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006641struct s_data {
6642#ifdef CONFIG_NUMA
6643 int sd_allnodes;
6644 cpumask_var_t domainspan;
6645 cpumask_var_t covered;
6646 cpumask_var_t notcovered;
6647#endif
6648 cpumask_var_t nodemask;
6649 cpumask_var_t this_sibling_map;
6650 cpumask_var_t this_core_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02006651 cpumask_var_t this_book_map;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006652 cpumask_var_t send_covered;
6653 cpumask_var_t tmpmask;
6654 struct sched_group **sched_group_nodes;
6655 struct root_domain *rd;
6656};
6657
Andreas Herrmann2109b992009-08-18 12:53:00 +02006658enum s_alloc {
6659 sa_sched_groups = 0,
6660 sa_rootdomain,
6661 sa_tmpmask,
6662 sa_send_covered,
Heiko Carstens01a08542010-08-31 10:28:16 +02006663 sa_this_book_map,
Andreas Herrmann2109b992009-08-18 12:53:00 +02006664 sa_this_core_map,
6665 sa_this_sibling_map,
6666 sa_nodemask,
6667 sa_sched_group_nodes,
6668#ifdef CONFIG_NUMA
6669 sa_notcovered,
6670 sa_covered,
6671 sa_domainspan,
6672#endif
6673 sa_none,
6674};
6675
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306676/*
Ingo Molnar48f24c42006-07-03 00:25:40 -07006677 * SMT sched-domains:
John Hawkes9c1cfda2005-09-06 15:18:14 -07006678 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006679#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306680static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
Tejun Heo1871e522009-10-29 22:34:13 +09006681static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006682
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006683static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306684cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
6685 struct sched_group **sg, struct cpumask *unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006686{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006687 if (sg)
Tejun Heo1871e522009-10-29 22:34:13 +09006688 *sg = &per_cpu(sched_groups, cpu).sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006689 return cpu;
6690}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006691#endif /* CONFIG_SCHED_SMT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006692
Ingo Molnar48f24c42006-07-03 00:25:40 -07006693/*
6694 * multi-core sched-domains:
6695 */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006696#ifdef CONFIG_SCHED_MC
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306697static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
6698static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006699
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006700static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306701cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
6702 struct sched_group **sg, struct cpumask *mask)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006703{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006704 int group;
Heiko Carstensf2698932010-08-31 10:28:15 +02006705#ifdef CONFIG_SCHED_SMT
Rusty Russellc69fc562009-03-13 14:49:46 +10306706 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306707 group = cpumask_first(mask);
Heiko Carstensf2698932010-08-31 10:28:15 +02006708#else
6709 group = cpu;
6710#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006711 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306712 *sg = &per_cpu(sched_group_core, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006713 return group;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006714}
Heiko Carstensf2698932010-08-31 10:28:15 +02006715#endif /* CONFIG_SCHED_MC */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006716
Heiko Carstens01a08542010-08-31 10:28:16 +02006717/*
6718 * book sched-domains:
6719 */
6720#ifdef CONFIG_SCHED_BOOK
6721static DEFINE_PER_CPU(struct static_sched_domain, book_domains);
6722static DEFINE_PER_CPU(struct static_sched_group, sched_group_book);
6723
Linus Torvalds1da177e2005-04-16 15:20:36 -07006724static int
Heiko Carstens01a08542010-08-31 10:28:16 +02006725cpu_to_book_group(int cpu, const struct cpumask *cpu_map,
6726 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006727{
Heiko Carstens01a08542010-08-31 10:28:16 +02006728 int group = cpu;
6729#ifdef CONFIG_SCHED_MC
6730 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
6731 group = cpumask_first(mask);
6732#elif defined(CONFIG_SCHED_SMT)
6733 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
6734 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006735#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02006736 if (sg)
6737 *sg = &per_cpu(sched_group_book, group).sg;
6738 return group;
6739}
6740#endif /* CONFIG_SCHED_BOOK */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006741
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306742static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
6743static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006744
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006745static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306746cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
6747 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006748{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006749 int group;
Heiko Carstens01a08542010-08-31 10:28:16 +02006750#ifdef CONFIG_SCHED_BOOK
6751 cpumask_and(mask, cpu_book_mask(cpu), cpu_map);
6752 group = cpumask_first(mask);
6753#elif defined(CONFIG_SCHED_MC)
Mike Travis6ca09df2008-12-31 18:08:45 -08006754 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306755 group = cpumask_first(mask);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006756#elif defined(CONFIG_SCHED_SMT)
Rusty Russellc69fc562009-03-13 14:49:46 +10306757 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306758 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006759#else
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006760 group = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006761#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006762 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306763 *sg = &per_cpu(sched_group_phys, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006764 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006765}
6766
6767#ifdef CONFIG_NUMA
John Hawkes9c1cfda2005-09-06 15:18:14 -07006768/*
6769 * The init_sched_build_groups can't handle what we want to do with node
6770 * groups, so roll our own. Now each node has its own list of groups which
6771 * gets dynamically allocated.
6772 */
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006773static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
Mike Travis434d53b2008-04-04 18:11:04 -07006774static struct sched_group ***sched_group_nodes_bycpu;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006775
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006776static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306777static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006778
Rusty Russell96f874e2008-11-25 02:35:14 +10306779static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
6780 struct sched_group **sg,
6781 struct cpumask *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006782{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006783 int group;
6784
Mike Travis6ca09df2008-12-31 18:08:45 -08006785 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306786 group = cpumask_first(nodemask);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006787
6788 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306789 *sg = &per_cpu(sched_group_allnodes, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006790 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006791}
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006792
Siddha, Suresh B08069032006-03-27 01:15:23 -08006793static void init_numa_sched_groups_power(struct sched_group *group_head)
6794{
6795 struct sched_group *sg = group_head;
6796 int j;
6797
6798 if (!sg)
6799 return;
Andi Kleen3a5c3592007-10-15 17:00:14 +02006800 do {
Rusty Russell758b2cd2008-11-25 02:35:04 +10306801 for_each_cpu(j, sched_group_cpus(sg)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02006802 struct sched_domain *sd;
Siddha, Suresh B08069032006-03-27 01:15:23 -08006803
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306804 sd = &per_cpu(phys_domains, j).sd;
Miao Xie13318a72009-04-15 09:59:10 +08006805 if (j != group_first_cpu(sd->groups)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02006806 /*
6807 * Only add "power" once for each
6808 * physical package.
6809 */
6810 continue;
6811 }
6812
Peter Zijlstra18a38852009-09-01 10:34:39 +02006813 sg->cpu_power += sd->groups->cpu_power;
Siddha, Suresh B08069032006-03-27 01:15:23 -08006814 }
Andi Kleen3a5c3592007-10-15 17:00:14 +02006815 sg = sg->next;
6816 } while (sg != group_head);
Siddha, Suresh B08069032006-03-27 01:15:23 -08006817}
Andreas Herrmann0601a882009-08-18 13:01:11 +02006818
6819static int build_numa_sched_groups(struct s_data *d,
6820 const struct cpumask *cpu_map, int num)
6821{
6822 struct sched_domain *sd;
6823 struct sched_group *sg, *prev;
6824 int n, j;
6825
6826 cpumask_clear(d->covered);
6827 cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
6828 if (cpumask_empty(d->nodemask)) {
6829 d->sched_group_nodes[num] = NULL;
6830 goto out;
6831 }
6832
6833 sched_domain_node_span(num, d->domainspan);
6834 cpumask_and(d->domainspan, d->domainspan, cpu_map);
6835
6836 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
6837 GFP_KERNEL, num);
6838 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006839 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
6840 num);
Andreas Herrmann0601a882009-08-18 13:01:11 +02006841 return -ENOMEM;
6842 }
6843 d->sched_group_nodes[num] = sg;
6844
6845 for_each_cpu(j, d->nodemask) {
6846 sd = &per_cpu(node_domains, j).sd;
6847 sd->groups = sg;
6848 }
6849
Peter Zijlstra18a38852009-09-01 10:34:39 +02006850 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02006851 cpumask_copy(sched_group_cpus(sg), d->nodemask);
6852 sg->next = sg;
6853 cpumask_or(d->covered, d->covered, d->nodemask);
6854
6855 prev = sg;
6856 for (j = 0; j < nr_node_ids; j++) {
6857 n = (num + j) % nr_node_ids;
6858 cpumask_complement(d->notcovered, d->covered);
6859 cpumask_and(d->tmpmask, d->notcovered, cpu_map);
6860 cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
6861 if (cpumask_empty(d->tmpmask))
6862 break;
6863 cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
6864 if (cpumask_empty(d->tmpmask))
6865 continue;
6866 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
6867 GFP_KERNEL, num);
6868 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006869 printk(KERN_WARNING
6870 "Can not alloc domain group for node %d\n", j);
Andreas Herrmann0601a882009-08-18 13:01:11 +02006871 return -ENOMEM;
6872 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02006873 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02006874 cpumask_copy(sched_group_cpus(sg), d->tmpmask);
6875 sg->next = prev->next;
6876 cpumask_or(d->covered, d->covered, d->tmpmask);
6877 prev->next = sg;
6878 prev = sg;
6879 }
6880out:
6881 return 0;
6882}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006883#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006884
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006885#ifdef CONFIG_NUMA
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006886/* Free memory allocated for various sched_group structures */
Rusty Russell96f874e2008-11-25 02:35:14 +10306887static void free_sched_groups(const struct cpumask *cpu_map,
6888 struct cpumask *nodemask)
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006889{
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006890 int cpu, i;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006891
Rusty Russellabcd0832008-11-25 02:35:02 +10306892 for_each_cpu(cpu, cpu_map) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006893 struct sched_group **sched_group_nodes
6894 = sched_group_nodes_bycpu[cpu];
6895
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006896 if (!sched_group_nodes)
6897 continue;
6898
Mike Travis076ac2a2008-05-12 21:21:12 +02006899 for (i = 0; i < nr_node_ids; i++) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006900 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6901
Mike Travis6ca09df2008-12-31 18:08:45 -08006902 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306903 if (cpumask_empty(nodemask))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006904 continue;
6905
6906 if (sg == NULL)
6907 continue;
6908 sg = sg->next;
6909next_sg:
6910 oldsg = sg;
6911 sg = sg->next;
6912 kfree(oldsg);
6913 if (oldsg != sched_group_nodes[i])
6914 goto next_sg;
6915 }
6916 kfree(sched_group_nodes);
6917 sched_group_nodes_bycpu[cpu] = NULL;
6918 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006919}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006920#else /* !CONFIG_NUMA */
Rusty Russell96f874e2008-11-25 02:35:14 +10306921static void free_sched_groups(const struct cpumask *cpu_map,
6922 struct cpumask *nodemask)
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006923{
6924}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006925#endif /* CONFIG_NUMA */
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006926
Linus Torvalds1da177e2005-04-16 15:20:36 -07006927/*
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006928 * Initialize sched groups cpu_power.
6929 *
6930 * cpu_power indicates the capacity of sched group, which is used while
6931 * distributing the load between different sched groups in a sched domain.
6932 * Typically cpu_power for all the groups in a sched domain will be same unless
6933 * there are asymmetries in the topology. If there are asymmetries, group
6934 * having more cpu_power will pickup more load compared to the group having
6935 * less cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006936 */
6937static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6938{
6939 struct sched_domain *child;
6940 struct sched_group *group;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02006941 long power;
6942 int weight;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006943
6944 WARN_ON(!sd || !sd->groups);
6945
Miao Xie13318a72009-04-15 09:59:10 +08006946 if (cpu != group_first_cpu(sd->groups))
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006947 return;
6948
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006949 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
6950
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006951 child = sd->child;
6952
Peter Zijlstra18a38852009-09-01 10:34:39 +02006953 sd->groups->cpu_power = 0;
Eric Dumazet5517d862007-05-08 00:32:57 -07006954
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02006955 if (!child) {
6956 power = SCHED_LOAD_SCALE;
6957 weight = cpumask_weight(sched_domain_span(sd));
6958 /*
6959 * SMT siblings share the power of a single core.
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02006960 * Usually multiple threads get a better yield out of
6961 * that one core than a single thread would have,
6962 * reflect that in sd->smt_gain.
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02006963 */
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02006964 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
6965 power *= sd->smt_gain;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02006966 power /= weight;
Peter Zijlstraa52bfd72009-09-01 10:34:35 +02006967 power >>= SCHED_LOAD_SHIFT;
6968 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02006969 sd->groups->cpu_power += power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006970 return;
6971 }
6972
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006973 /*
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02006974 * Add cpu_power of each child group to this groups cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006975 */
6976 group = child->groups;
6977 do {
Peter Zijlstra18a38852009-09-01 10:34:39 +02006978 sd->groups->cpu_power += group->cpu_power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006979 group = group->next;
6980 } while (group != child->groups);
6981}
6982
6983/*
Mike Travis7c16ec52008-04-04 18:11:11 -07006984 * Initializers for schedule domains
6985 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6986 */
6987
Ingo Molnara5d8c342008-10-09 11:35:51 +02006988#ifdef CONFIG_SCHED_DEBUG
6989# define SD_INIT_NAME(sd, type) sd->name = #type
6990#else
6991# define SD_INIT_NAME(sd, type) do { } while (0)
6992#endif
6993
Mike Travis7c16ec52008-04-04 18:11:11 -07006994#define SD_INIT(sd, type) sd_init_##type(sd)
Ingo Molnara5d8c342008-10-09 11:35:51 +02006995
Mike Travis7c16ec52008-04-04 18:11:11 -07006996#define SD_INIT_FUNC(type) \
6997static noinline void sd_init_##type(struct sched_domain *sd) \
6998{ \
6999 memset(sd, 0, sizeof(*sd)); \
7000 *sd = SD_##type##_INIT; \
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007001 sd->level = SD_LV_##type; \
Ingo Molnara5d8c342008-10-09 11:35:51 +02007002 SD_INIT_NAME(sd, type); \
Mike Travis7c16ec52008-04-04 18:11:11 -07007003}
7004
7005SD_INIT_FUNC(CPU)
7006#ifdef CONFIG_NUMA
7007 SD_INIT_FUNC(ALLNODES)
7008 SD_INIT_FUNC(NODE)
7009#endif
7010#ifdef CONFIG_SCHED_SMT
7011 SD_INIT_FUNC(SIBLING)
7012#endif
7013#ifdef CONFIG_SCHED_MC
7014 SD_INIT_FUNC(MC)
7015#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007016#ifdef CONFIG_SCHED_BOOK
7017 SD_INIT_FUNC(BOOK)
7018#endif
Mike Travis7c16ec52008-04-04 18:11:11 -07007019
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007020static int default_relax_domain_level = -1;
7021
7022static int __init setup_relax_domain_level(char *str)
7023{
Li Zefan30e0e172008-05-13 10:27:17 +08007024 unsigned long val;
7025
7026 val = simple_strtoul(str, NULL, 0);
7027 if (val < SD_LV_MAX)
7028 default_relax_domain_level = val;
7029
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007030 return 1;
7031}
7032__setup("relax_domain_level=", setup_relax_domain_level);
7033
7034static void set_domain_attribute(struct sched_domain *sd,
7035 struct sched_domain_attr *attr)
7036{
7037 int request;
7038
7039 if (!attr || attr->relax_domain_level < 0) {
7040 if (default_relax_domain_level < 0)
7041 return;
7042 else
7043 request = default_relax_domain_level;
7044 } else
7045 request = attr->relax_domain_level;
7046 if (request < sd->level) {
7047 /* turn off idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007048 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007049 } else {
7050 /* turn on idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007051 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007052 }
7053}
7054
Andreas Herrmann2109b992009-08-18 12:53:00 +02007055static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
7056 const struct cpumask *cpu_map)
7057{
7058 switch (what) {
7059 case sa_sched_groups:
7060 free_sched_groups(cpu_map, d->tmpmask); /* fall through */
7061 d->sched_group_nodes = NULL;
7062 case sa_rootdomain:
7063 free_rootdomain(d->rd); /* fall through */
7064 case sa_tmpmask:
7065 free_cpumask_var(d->tmpmask); /* fall through */
7066 case sa_send_covered:
7067 free_cpumask_var(d->send_covered); /* fall through */
Heiko Carstens01a08542010-08-31 10:28:16 +02007068 case sa_this_book_map:
7069 free_cpumask_var(d->this_book_map); /* fall through */
Andreas Herrmann2109b992009-08-18 12:53:00 +02007070 case sa_this_core_map:
7071 free_cpumask_var(d->this_core_map); /* fall through */
7072 case sa_this_sibling_map:
7073 free_cpumask_var(d->this_sibling_map); /* fall through */
7074 case sa_nodemask:
7075 free_cpumask_var(d->nodemask); /* fall through */
7076 case sa_sched_group_nodes:
7077#ifdef CONFIG_NUMA
7078 kfree(d->sched_group_nodes); /* fall through */
7079 case sa_notcovered:
7080 free_cpumask_var(d->notcovered); /* fall through */
7081 case sa_covered:
7082 free_cpumask_var(d->covered); /* fall through */
7083 case sa_domainspan:
7084 free_cpumask_var(d->domainspan); /* fall through */
7085#endif
7086 case sa_none:
7087 break;
7088 }
7089}
7090
7091static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7092 const struct cpumask *cpu_map)
7093{
7094#ifdef CONFIG_NUMA
7095 if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
7096 return sa_none;
7097 if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
7098 return sa_domainspan;
7099 if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
7100 return sa_covered;
7101 /* Allocate the per-node list of sched groups */
7102 d->sched_group_nodes = kcalloc(nr_node_ids,
7103 sizeof(struct sched_group *), GFP_KERNEL);
7104 if (!d->sched_group_nodes) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007105 printk(KERN_WARNING "Can not alloc sched group node list\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02007106 return sa_notcovered;
7107 }
7108 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
7109#endif
7110 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
7111 return sa_sched_group_nodes;
7112 if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
7113 return sa_nodemask;
7114 if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
7115 return sa_this_sibling_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02007116 if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL))
Andreas Herrmann2109b992009-08-18 12:53:00 +02007117 return sa_this_core_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02007118 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
7119 return sa_this_book_map;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007120 if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
7121 return sa_send_covered;
7122 d->rd = alloc_rootdomain();
7123 if (!d->rd) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007124 printk(KERN_WARNING "Cannot alloc root domain\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02007125 return sa_tmpmask;
7126 }
7127 return sa_rootdomain;
7128}
7129
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007130static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
7131 const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
7132{
7133 struct sched_domain *sd = NULL;
7134#ifdef CONFIG_NUMA
7135 struct sched_domain *parent;
7136
7137 d->sd_allnodes = 0;
7138 if (cpumask_weight(cpu_map) >
7139 SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
7140 sd = &per_cpu(allnodes_domains, i).sd;
7141 SD_INIT(sd, ALLNODES);
7142 set_domain_attribute(sd, attr);
7143 cpumask_copy(sched_domain_span(sd), cpu_map);
7144 cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
7145 d->sd_allnodes = 1;
7146 }
7147 parent = sd;
7148
7149 sd = &per_cpu(node_domains, i).sd;
7150 SD_INIT(sd, NODE);
7151 set_domain_attribute(sd, attr);
7152 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
7153 sd->parent = parent;
7154 if (parent)
7155 parent->child = sd;
7156 cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
7157#endif
7158 return sd;
7159}
7160
Andreas Herrmann87cce662009-08-18 12:54:55 +02007161static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
7162 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7163 struct sched_domain *parent, int i)
7164{
7165 struct sched_domain *sd;
7166 sd = &per_cpu(phys_domains, i).sd;
7167 SD_INIT(sd, CPU);
7168 set_domain_attribute(sd, attr);
7169 cpumask_copy(sched_domain_span(sd), d->nodemask);
7170 sd->parent = parent;
7171 if (parent)
7172 parent->child = sd;
7173 cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
7174 return sd;
7175}
7176
Heiko Carstens01a08542010-08-31 10:28:16 +02007177static struct sched_domain *__build_book_sched_domain(struct s_data *d,
7178 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7179 struct sched_domain *parent, int i)
7180{
7181 struct sched_domain *sd = parent;
7182#ifdef CONFIG_SCHED_BOOK
7183 sd = &per_cpu(book_domains, i).sd;
7184 SD_INIT(sd, BOOK);
7185 set_domain_attribute(sd, attr);
7186 cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i));
7187 sd->parent = parent;
7188 parent->child = sd;
7189 cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask);
7190#endif
7191 return sd;
7192}
7193
Andreas Herrmann410c4082009-08-18 12:56:14 +02007194static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
7195 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7196 struct sched_domain *parent, int i)
7197{
7198 struct sched_domain *sd = parent;
7199#ifdef CONFIG_SCHED_MC
7200 sd = &per_cpu(core_domains, i).sd;
7201 SD_INIT(sd, MC);
7202 set_domain_attribute(sd, attr);
7203 cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
7204 sd->parent = parent;
7205 parent->child = sd;
7206 cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
7207#endif
7208 return sd;
7209}
7210
Andreas Herrmannd8173532009-08-18 12:57:03 +02007211static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
7212 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7213 struct sched_domain *parent, int i)
7214{
7215 struct sched_domain *sd = parent;
7216#ifdef CONFIG_SCHED_SMT
7217 sd = &per_cpu(cpu_domains, i).sd;
7218 SD_INIT(sd, SIBLING);
7219 set_domain_attribute(sd, attr);
7220 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
7221 sd->parent = parent;
7222 parent->child = sd;
7223 cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
7224#endif
7225 return sd;
7226}
7227
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007228static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
7229 const struct cpumask *cpu_map, int cpu)
7230{
7231 switch (l) {
7232#ifdef CONFIG_SCHED_SMT
7233 case SD_LV_SIBLING: /* set up CPU (sibling) groups */
7234 cpumask_and(d->this_sibling_map, cpu_map,
7235 topology_thread_cpumask(cpu));
7236 if (cpu == cpumask_first(d->this_sibling_map))
7237 init_sched_build_groups(d->this_sibling_map, cpu_map,
7238 &cpu_to_cpu_group,
7239 d->send_covered, d->tmpmask);
7240 break;
7241#endif
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007242#ifdef CONFIG_SCHED_MC
7243 case SD_LV_MC: /* set up multi-core groups */
7244 cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
7245 if (cpu == cpumask_first(d->this_core_map))
7246 init_sched_build_groups(d->this_core_map, cpu_map,
7247 &cpu_to_core_group,
7248 d->send_covered, d->tmpmask);
7249 break;
7250#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007251#ifdef CONFIG_SCHED_BOOK
7252 case SD_LV_BOOK: /* set up book groups */
7253 cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu));
7254 if (cpu == cpumask_first(d->this_book_map))
7255 init_sched_build_groups(d->this_book_map, cpu_map,
7256 &cpu_to_book_group,
7257 d->send_covered, d->tmpmask);
7258 break;
7259#endif
Andreas Herrmann86548092009-08-18 12:59:28 +02007260 case SD_LV_CPU: /* set up physical groups */
7261 cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
7262 if (!cpumask_empty(d->nodemask))
7263 init_sched_build_groups(d->nodemask, cpu_map,
7264 &cpu_to_phys_group,
7265 d->send_covered, d->tmpmask);
7266 break;
Andreas Herrmannde616e32009-08-18 13:00:13 +02007267#ifdef CONFIG_NUMA
7268 case SD_LV_ALLNODES:
7269 init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
7270 d->send_covered, d->tmpmask);
7271 break;
7272#endif
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007273 default:
7274 break;
7275 }
7276}
7277
Mike Travis7c16ec52008-04-04 18:11:11 -07007278/*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007279 * Build sched domains for a given set of cpus and attach the sched domains
7280 * to the individual cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07007281 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307282static int __build_sched_domains(const struct cpumask *cpu_map,
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007283 struct sched_domain_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007284{
Andreas Herrmann2109b992009-08-18 12:53:00 +02007285 enum s_alloc alloc_state = sa_none;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007286 struct s_data d;
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007287 struct sched_domain *sd;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007288 int i;
John Hawkesd1b55132005-09-06 15:18:14 -07007289#ifdef CONFIG_NUMA
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007290 d.sd_allnodes = 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307291#endif
7292
Andreas Herrmann2109b992009-08-18 12:53:00 +02007293 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7294 if (alloc_state != sa_rootdomain)
7295 goto error;
7296 alloc_state = sa_sched_groups;
Mike Travis7c16ec52008-04-04 18:11:11 -07007297
Linus Torvalds1da177e2005-04-16 15:20:36 -07007298 /*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007299 * Set up domains for cpus specified by the cpu_map.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007300 */
Rusty Russellabcd0832008-11-25 02:35:02 +10307301 for_each_cpu(i, cpu_map) {
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007302 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
7303 cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007304
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007305 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
Andreas Herrmann87cce662009-08-18 12:54:55 +02007306 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
Heiko Carstens01a08542010-08-31 10:28:16 +02007307 sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmann410c4082009-08-18 12:56:14 +02007308 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmannd8173532009-08-18 12:57:03 +02007309 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007310 }
7311
Rusty Russellabcd0832008-11-25 02:35:02 +10307312 for_each_cpu(i, cpu_map) {
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007313 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
Heiko Carstens01a08542010-08-31 10:28:16 +02007314 build_sched_groups(&d, SD_LV_BOOK, cpu_map, i);
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007315 build_sched_groups(&d, SD_LV_MC, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007316 }
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007317
Linus Torvalds1da177e2005-04-16 15:20:36 -07007318 /* Set up physical groups */
Andreas Herrmann86548092009-08-18 12:59:28 +02007319 for (i = 0; i < nr_node_ids; i++)
7320 build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007321
7322#ifdef CONFIG_NUMA
7323 /* Set up node groups */
Andreas Herrmannde616e32009-08-18 13:00:13 +02007324 if (d.sd_allnodes)
7325 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007326
Andreas Herrmann0601a882009-08-18 13:01:11 +02007327 for (i = 0; i < nr_node_ids; i++)
7328 if (build_numa_sched_groups(&d, cpu_map, i))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007329 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007330#endif
7331
7332 /* Calculate CPU power for physical packages and nodes */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007333#ifdef CONFIG_SCHED_SMT
Rusty Russellabcd0832008-11-25 02:35:02 +10307334 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007335 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007336 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007337 }
7338#endif
7339#ifdef CONFIG_SCHED_MC
Rusty Russellabcd0832008-11-25 02:35:02 +10307340 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007341 sd = &per_cpu(core_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007342 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007343 }
7344#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007345#ifdef CONFIG_SCHED_BOOK
7346 for_each_cpu(i, cpu_map) {
7347 sd = &per_cpu(book_domains, i).sd;
7348 init_sched_groups_power(i, sd);
7349 }
7350#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07007351
Rusty Russellabcd0832008-11-25 02:35:02 +10307352 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007353 sd = &per_cpu(phys_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007354 init_sched_groups_power(i, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007355 }
7356
John Hawkes9c1cfda2005-09-06 15:18:14 -07007357#ifdef CONFIG_NUMA
Mike Travis076ac2a2008-05-12 21:21:12 +02007358 for (i = 0; i < nr_node_ids; i++)
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007359 init_numa_sched_groups_power(d.sched_group_nodes[i]);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007360
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007361 if (d.sd_allnodes) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007362 struct sched_group *sg;
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007363
Rusty Russell96f874e2008-11-25 02:35:14 +10307364 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007365 d.tmpmask);
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007366 init_numa_sched_groups_power(sg);
7367 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07007368#endif
7369
Linus Torvalds1da177e2005-04-16 15:20:36 -07007370 /* Attach the domains */
Rusty Russellabcd0832008-11-25 02:35:02 +10307371 for_each_cpu(i, cpu_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007372#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307373 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007374#elif defined(CONFIG_SCHED_MC)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307375 sd = &per_cpu(core_domains, i).sd;
Heiko Carstens01a08542010-08-31 10:28:16 +02007376#elif defined(CONFIG_SCHED_BOOK)
7377 sd = &per_cpu(book_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007378#else
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307379 sd = &per_cpu(phys_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007380#endif
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007381 cpu_attach_domain(sd, d.rd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007382 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007383
Andreas Herrmann2109b992009-08-18 12:53:00 +02007384 d.sched_group_nodes = NULL; /* don't free this we still need it */
7385 __free_domain_allocs(&d, sa_tmpmask, cpu_map);
7386 return 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307387
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007388error:
Andreas Herrmann2109b992009-08-18 12:53:00 +02007389 __free_domain_allocs(&d, alloc_state, cpu_map);
7390 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007391}
Paul Jackson029190c2007-10-18 23:40:20 -07007392
Rusty Russell96f874e2008-11-25 02:35:14 +10307393static int build_sched_domains(const struct cpumask *cpu_map)
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007394{
7395 return __build_sched_domains(cpu_map, NULL);
7396}
7397
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307398static cpumask_var_t *doms_cur; /* current sched domains */
Paul Jackson029190c2007-10-18 23:40:20 -07007399static int ndoms_cur; /* number of sched domains in 'doms_cur' */
Ingo Molnar4285f5942008-05-16 17:47:14 +02007400static struct sched_domain_attr *dattr_cur;
7401 /* attribues of custom domains in 'doms_cur' */
Paul Jackson029190c2007-10-18 23:40:20 -07007402
7403/*
7404 * Special case: If a kmalloc of a doms_cur partition (array of
Rusty Russell42128232008-11-25 02:35:12 +10307405 * cpumask) fails, then fallback to a single sched domain,
7406 * as determined by the single cpumask fallback_doms.
Paul Jackson029190c2007-10-18 23:40:20 -07007407 */
Rusty Russell42128232008-11-25 02:35:12 +10307408static cpumask_var_t fallback_doms;
Paul Jackson029190c2007-10-18 23:40:20 -07007409
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007410/*
7411 * arch_update_cpu_topology lets virtualized architectures update the
7412 * cpu core maps. It is supposed to return 1 if the topology changed
7413 * or 0 if it stayed the same.
7414 */
7415int __attribute__((weak)) arch_update_cpu_topology(void)
Heiko Carstens22e52b02008-03-12 18:31:59 +01007416{
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007417 return 0;
Heiko Carstens22e52b02008-03-12 18:31:59 +01007418}
7419
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307420cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7421{
7422 int i;
7423 cpumask_var_t *doms;
7424
7425 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7426 if (!doms)
7427 return NULL;
7428 for (i = 0; i < ndoms; i++) {
7429 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7430 free_sched_domains(doms, i);
7431 return NULL;
7432 }
7433 }
7434 return doms;
7435}
7436
7437void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7438{
7439 unsigned int i;
7440 for (i = 0; i < ndoms; i++)
7441 free_cpumask_var(doms[i]);
7442 kfree(doms);
7443}
7444
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007445/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007446 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
Paul Jackson029190c2007-10-18 23:40:20 -07007447 * For now this just excludes isolated cpus, but could be used to
7448 * exclude other special cases in the future.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007449 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307450static int arch_init_sched_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007451{
Milton Miller73785472007-10-24 18:23:48 +02007452 int err;
7453
Heiko Carstens22e52b02008-03-12 18:31:59 +01007454 arch_update_cpu_topology();
Paul Jackson029190c2007-10-18 23:40:20 -07007455 ndoms_cur = 1;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307456 doms_cur = alloc_sched_domains(ndoms_cur);
Paul Jackson029190c2007-10-18 23:40:20 -07007457 if (!doms_cur)
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307458 doms_cur = &fallback_doms;
7459 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007460 dattr_cur = NULL;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307461 err = build_sched_domains(doms_cur[0]);
Milton Miller6382bc92007-10-15 17:00:19 +02007462 register_sched_domain_sysctl();
Milton Miller73785472007-10-24 18:23:48 +02007463
7464 return err;
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007465}
7466
Rusty Russell96f874e2008-11-25 02:35:14 +10307467static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
7468 struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007469{
Mike Travis7c16ec52008-04-04 18:11:11 -07007470 free_sched_groups(cpu_map, tmpmask);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007471}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007472
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007473/*
7474 * Detach sched domains from a group of cpus specified in cpu_map
7475 * These cpus will now be attached to the NULL domain
7476 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307477static void detach_destroy_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007478{
Rusty Russell96f874e2008-11-25 02:35:14 +10307479 /* Save because hotplug lock held. */
7480 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007481 int i;
7482
Rusty Russellabcd0832008-11-25 02:35:02 +10307483 for_each_cpu(i, cpu_map)
Gregory Haskins57d885f2008-01-25 21:08:18 +01007484 cpu_attach_domain(NULL, &def_root_domain, i);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007485 synchronize_sched();
Rusty Russell96f874e2008-11-25 02:35:14 +10307486 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007487}
7488
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007489/* handle null as "default" */
7490static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7491 struct sched_domain_attr *new, int idx_new)
7492{
7493 struct sched_domain_attr tmp;
7494
7495 /* fast path */
7496 if (!new && !cur)
7497 return 1;
7498
7499 tmp = SD_ATTR_INIT;
7500 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7501 new ? (new + idx_new) : &tmp,
7502 sizeof(struct sched_domain_attr));
7503}
7504
Paul Jackson029190c2007-10-18 23:40:20 -07007505/*
7506 * Partition sched domains as specified by the 'ndoms_new'
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007507 * cpumasks in the array doms_new[] of cpumasks. This compares
Paul Jackson029190c2007-10-18 23:40:20 -07007508 * doms_new[] to the current sched domain partitioning, doms_cur[].
7509 * It destroys each deleted domain and builds each new domain.
7510 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307511 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007512 * The masks don't intersect (don't overlap.) We should setup one
7513 * sched domain for each mask. CPUs not in any of the cpumasks will
7514 * not be load balanced. If the same cpumask appears both in the
Paul Jackson029190c2007-10-18 23:40:20 -07007515 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7516 * it as it is.
7517 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307518 * The passed in 'doms_new' should be allocated using
7519 * alloc_sched_domains. This routine takes ownership of it and will
7520 * free_sched_domains it when done with it. If the caller failed the
7521 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7522 * and partition_sched_domains() will fallback to the single partition
7523 * 'fallback_doms', it also forces the domains to be rebuilt.
Paul Jackson029190c2007-10-18 23:40:20 -07007524 *
Rusty Russell96f874e2008-11-25 02:35:14 +10307525 * If doms_new == NULL it will be replaced with cpu_online_mask.
Li Zefan700018e2008-11-18 14:02:03 +08007526 * ndoms_new == 0 is a special case for destroying existing domains,
7527 * and it will not create the default domain.
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007528 *
Paul Jackson029190c2007-10-18 23:40:20 -07007529 * Call with hotplug lock held
7530 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307531void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007532 struct sched_domain_attr *dattr_new)
Paul Jackson029190c2007-10-18 23:40:20 -07007533{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007534 int i, j, n;
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007535 int new_topology;
Paul Jackson029190c2007-10-18 23:40:20 -07007536
Heiko Carstens712555e2008-04-28 11:33:07 +02007537 mutex_lock(&sched_domains_mutex);
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007538
Milton Miller73785472007-10-24 18:23:48 +02007539 /* always unregister in case we don't destroy any domains */
7540 unregister_sched_domain_sysctl();
7541
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007542 /* Let architecture update cpu core mappings. */
7543 new_topology = arch_update_cpu_topology();
7544
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007545 n = doms_new ? ndoms_new : 0;
Paul Jackson029190c2007-10-18 23:40:20 -07007546
7547 /* Destroy deleted domains */
7548 for (i = 0; i < ndoms_cur; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007549 for (j = 0; j < n && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307550 if (cpumask_equal(doms_cur[i], doms_new[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007551 && dattrs_equal(dattr_cur, i, dattr_new, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007552 goto match1;
7553 }
7554 /* no match - a current sched domain not in new doms_new[] */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307555 detach_destroy_domains(doms_cur[i]);
Paul Jackson029190c2007-10-18 23:40:20 -07007556match1:
7557 ;
7558 }
7559
Max Krasnyanskye761b772008-07-15 04:43:49 -07007560 if (doms_new == NULL) {
7561 ndoms_cur = 0;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307562 doms_new = &fallback_doms;
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007563 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
Li Zefanfaa2f982008-11-04 16:20:23 +08007564 WARN_ON_ONCE(dattr_new);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007565 }
7566
Paul Jackson029190c2007-10-18 23:40:20 -07007567 /* Build new domains */
7568 for (i = 0; i < ndoms_new; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007569 for (j = 0; j < ndoms_cur && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307570 if (cpumask_equal(doms_new[i], doms_cur[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007571 && dattrs_equal(dattr_new, i, dattr_cur, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007572 goto match2;
7573 }
7574 /* no match - add a new doms_new */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307575 __build_sched_domains(doms_new[i],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007576 dattr_new ? dattr_new + i : NULL);
Paul Jackson029190c2007-10-18 23:40:20 -07007577match2:
7578 ;
7579 }
7580
7581 /* Remember the new sched domains */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307582 if (doms_cur != &fallback_doms)
7583 free_sched_domains(doms_cur, ndoms_cur);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007584 kfree(dattr_cur); /* kfree(NULL) is safe */
Paul Jackson029190c2007-10-18 23:40:20 -07007585 doms_cur = doms_new;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007586 dattr_cur = dattr_new;
Paul Jackson029190c2007-10-18 23:40:20 -07007587 ndoms_cur = ndoms_new;
Milton Miller73785472007-10-24 18:23:48 +02007588
7589 register_sched_domain_sysctl();
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007590
Heiko Carstens712555e2008-04-28 11:33:07 +02007591 mutex_unlock(&sched_domains_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -07007592}
7593
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007594#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Li Zefanc70f22d2009-01-05 19:07:50 +08007595static void arch_reinit_sched_domains(void)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007596{
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007597 get_online_cpus();
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007598
7599 /* Destroy domains first to force the rebuild */
7600 partition_sched_domains(0, NULL, NULL);
7601
Max Krasnyanskye761b772008-07-15 04:43:49 -07007602 rebuild_sched_domains();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007603 put_online_cpus();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007604}
7605
7606static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7607{
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307608 unsigned int level = 0;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007609
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307610 if (sscanf(buf, "%u", &level) != 1)
7611 return -EINVAL;
7612
7613 /*
7614 * level is always be positive so don't check for
7615 * level < POWERSAVINGS_BALANCE_NONE which is 0
7616 * What happens on 0 or 1 byte write,
7617 * need to check for count as well?
7618 */
7619
7620 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007621 return -EINVAL;
7622
7623 if (smt)
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307624 sched_smt_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007625 else
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307626 sched_mc_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007627
Li Zefanc70f22d2009-01-05 19:07:50 +08007628 arch_reinit_sched_domains();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007629
Li Zefanc70f22d2009-01-05 19:07:50 +08007630 return count;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007631}
7632
Adrian Bunk6707de002007-08-12 18:08:19 +02007633#ifdef CONFIG_SCHED_MC
Andi Kleenf718cd42008-07-29 22:33:52 -07007634static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007635 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007636 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007637{
7638 return sprintf(page, "%u\n", sched_mc_power_savings);
7639}
Andi Kleenf718cd42008-07-29 22:33:52 -07007640static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007641 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007642 const char *buf, size_t count)
7643{
7644 return sched_power_savings_store(buf, count, 0);
7645}
Andi Kleenf718cd42008-07-29 22:33:52 -07007646static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7647 sched_mc_power_savings_show,
7648 sched_mc_power_savings_store);
Adrian Bunk6707de002007-08-12 18:08:19 +02007649#endif
7650
7651#ifdef CONFIG_SCHED_SMT
Andi Kleenf718cd42008-07-29 22:33:52 -07007652static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007653 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007654 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007655{
7656 return sprintf(page, "%u\n", sched_smt_power_savings);
7657}
Andi Kleenf718cd42008-07-29 22:33:52 -07007658static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007659 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007660 const char *buf, size_t count)
7661{
7662 return sched_power_savings_store(buf, count, 1);
7663}
Andi Kleenf718cd42008-07-29 22:33:52 -07007664static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7665 sched_smt_power_savings_show,
Adrian Bunk6707de002007-08-12 18:08:19 +02007666 sched_smt_power_savings_store);
7667#endif
7668
Li Zefan39aac642009-01-05 19:18:02 +08007669int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007670{
7671 int err = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007672
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007673#ifdef CONFIG_SCHED_SMT
7674 if (smt_capable())
7675 err = sysfs_create_file(&cls->kset.kobj,
7676 &attr_sched_smt_power_savings.attr);
7677#endif
7678#ifdef CONFIG_SCHED_MC
7679 if (!err && mc_capable())
7680 err = sysfs_create_file(&cls->kset.kobj,
7681 &attr_sched_mc_power_savings.attr);
7682#endif
7683 return err;
7684}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007685#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007686
Linus Torvalds1da177e2005-04-16 15:20:36 -07007687/*
Tejun Heo3a101d02010-06-08 21:40:36 +02007688 * Update cpusets according to cpu_active mask. If cpusets are
7689 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7690 * around partition_sched_domains().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007691 */
Tejun Heo0b2e9182010-06-21 23:53:31 +02007692static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7693 void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007694{
Tejun Heo3a101d02010-06-08 21:40:36 +02007695 switch (action & ~CPU_TASKS_FROZEN) {
Max Krasnyanskye761b772008-07-15 04:43:49 -07007696 case CPU_ONLINE:
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007697 case CPU_DOWN_FAILED:
Tejun Heo3a101d02010-06-08 21:40:36 +02007698 cpuset_update_active_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007699 return NOTIFY_OK;
Max Krasnyanskye761b772008-07-15 04:43:49 -07007700 default:
7701 return NOTIFY_DONE;
7702 }
7703}
Tejun Heo3a101d02010-06-08 21:40:36 +02007704
Tejun Heo0b2e9182010-06-21 23:53:31 +02007705static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7706 void *hcpu)
Tejun Heo3a101d02010-06-08 21:40:36 +02007707{
7708 switch (action & ~CPU_TASKS_FROZEN) {
7709 case CPU_DOWN_PREPARE:
7710 cpuset_update_active_cpus();
7711 return NOTIFY_OK;
7712 default:
7713 return NOTIFY_DONE;
7714 }
7715}
Max Krasnyanskye761b772008-07-15 04:43:49 -07007716
7717static int update_runtime(struct notifier_block *nfb,
7718 unsigned long action, void *hcpu)
7719{
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007720 int cpu = (int)(long)hcpu;
7721
Linus Torvalds1da177e2005-04-16 15:20:36 -07007722 switch (action) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007723 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007724 case CPU_DOWN_PREPARE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007725 disable_runtime(cpu_rq(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007726 return NOTIFY_OK;
7727
Linus Torvalds1da177e2005-04-16 15:20:36 -07007728 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007729 case CPU_DOWN_FAILED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007730 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007731 case CPU_ONLINE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007732 enable_runtime(cpu_rq(cpu));
Max Krasnyanskye761b772008-07-15 04:43:49 -07007733 return NOTIFY_OK;
7734
Linus Torvalds1da177e2005-04-16 15:20:36 -07007735 default:
7736 return NOTIFY_DONE;
7737 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007738}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007739
7740void __init sched_init_smp(void)
7741{
Rusty Russelldcc30a32008-11-25 02:35:12 +10307742 cpumask_var_t non_isolated_cpus;
7743
7744 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
Yong Zhangcb5fd132009-09-14 20:20:16 +08007745 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
Nick Piggin5c1e1762006-10-03 01:14:04 -07007746
Mike Travis434d53b2008-04-04 18:11:04 -07007747#if defined(CONFIG_NUMA)
7748 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
7749 GFP_KERNEL);
7750 BUG_ON(sched_group_nodes_bycpu == NULL);
7751#endif
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007752 get_online_cpus();
Heiko Carstens712555e2008-04-28 11:33:07 +02007753 mutex_lock(&sched_domains_mutex);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007754 arch_init_sched_domains(cpu_active_mask);
Rusty Russelldcc30a32008-11-25 02:35:12 +10307755 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7756 if (cpumask_empty(non_isolated_cpus))
7757 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
Heiko Carstens712555e2008-04-28 11:33:07 +02007758 mutex_unlock(&sched_domains_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007759 put_online_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007760
Tejun Heo3a101d02010-06-08 21:40:36 +02007761 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7762 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007763
7764 /* RT runtime code needs to handle some hotplug events */
7765 hotcpu_notifier(update_runtime, 0);
7766
Peter Zijlstrab328ca12008-04-29 10:02:46 +02007767 init_hrtick();
Nick Piggin5c1e1762006-10-03 01:14:04 -07007768
7769 /* Move init over to a non-isolated CPU */
Rusty Russelldcc30a32008-11-25 02:35:12 +10307770 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
Nick Piggin5c1e1762006-10-03 01:14:04 -07007771 BUG();
Ingo Molnar19978ca2007-11-09 22:39:38 +01007772 sched_init_granularity();
Rusty Russelldcc30a32008-11-25 02:35:12 +10307773 free_cpumask_var(non_isolated_cpus);
Rusty Russell42128232008-11-25 02:35:12 +10307774
Rusty Russell0e3900e2008-11-25 02:35:13 +10307775 init_sched_rt_class();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007776}
7777#else
7778void __init sched_init_smp(void)
7779{
Ingo Molnar19978ca2007-11-09 22:39:38 +01007780 sched_init_granularity();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007781}
7782#endif /* CONFIG_SMP */
7783
Arun R Bharadwajcd1bb942009-04-16 12:15:34 +05307784const_debug unsigned int sysctl_timer_migration = 1;
7785
Linus Torvalds1da177e2005-04-16 15:20:36 -07007786int in_sched_functions(unsigned long addr)
7787{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007788 return in_lock_functions(addr) ||
7789 (addr >= (unsigned long)__sched_text_start
7790 && addr < (unsigned long)__sched_text_end);
7791}
7792
Alexey Dobriyana9957442007-10-15 17:00:13 +02007793static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02007794{
7795 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02007796 INIT_LIST_HEAD(&cfs_rq->tasks);
Ingo Molnardd41f592007-07-09 18:51:59 +02007797#ifdef CONFIG_FAIR_GROUP_SCHED
7798 cfs_rq->rq = rq;
7799#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02007800 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
Ingo Molnardd41f592007-07-09 18:51:59 +02007801}
7802
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007803static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7804{
7805 struct rt_prio_array *array;
7806 int i;
7807
7808 array = &rt_rq->active;
7809 for (i = 0; i < MAX_RT_PRIO; i++) {
7810 INIT_LIST_HEAD(array->queue + i);
7811 __clear_bit(i, array->bitmap);
7812 }
7813 /* delimiter for bitsearch: */
7814 __set_bit(MAX_RT_PRIO, array->bitmap);
7815
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007816#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -05007817 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins398a1532009-01-14 09:10:04 -05007818#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -05007819 rt_rq->highest_prio.next = MAX_RT_PRIO;
Peter Zijlstra48d5e252008-01-25 21:08:31 +01007820#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007821#endif
7822#ifdef CONFIG_SMP
7823 rt_rq->rt_nr_migratory = 0;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007824 rt_rq->overloaded = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007825 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007826#endif
7827
7828 rt_rq->rt_time = 0;
7829 rt_rq->rt_throttled = 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007830 rt_rq->rt_runtime = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +01007831 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007832
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007833#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01007834 rt_rq->rt_nr_boosted = 0;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007835 rt_rq->rq = rq;
7836#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007837}
7838
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007839#ifdef CONFIG_FAIR_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007840static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08007841 struct sched_entity *se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007842 struct sched_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007843{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007844 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007845 tg->cfs_rq[cpu] = cfs_rq;
7846 init_cfs_rq(cfs_rq, rq);
7847 cfs_rq->tg = tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007848
7849 tg->se[cpu] = se;
Yong Zhang07e06b02011-01-07 15:17:36 +08007850 /* se could be NULL for root_task_group */
Dhaval Giani354d60c2008-04-19 19:44:59 +02007851 if (!se)
7852 return;
7853
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007854 if (!parent)
7855 se->cfs_rq = &rq->cfs;
7856 else
7857 se->cfs_rq = parent->my_q;
7858
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007859 se->my_q = cfs_rq;
Paul Turner94371782010-11-15 15:47:10 -08007860 update_load_set(&se->load, 0);
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007861 se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007862}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007863#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007864
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007865#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007866static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08007867 struct sched_rt_entity *rt_se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007868 struct sched_rt_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007869{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007870 struct rq *rq = cpu_rq(cpu);
7871
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007872 tg->rt_rq[cpu] = rt_rq;
7873 init_rt_rq(rt_rq, rq);
7874 rt_rq->tg = tg;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007875 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007876
7877 tg->rt_se[cpu] = rt_se;
Dhaval Giani354d60c2008-04-19 19:44:59 +02007878 if (!rt_se)
7879 return;
7880
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007881 if (!parent)
7882 rt_se->rt_rq = &rq->rt;
7883 else
7884 rt_se->rt_rq = parent->my_q;
7885
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007886 rt_se->my_q = rt_rq;
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007887 rt_se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007888 INIT_LIST_HEAD(&rt_se->run_list);
7889}
7890#endif
7891
Linus Torvalds1da177e2005-04-16 15:20:36 -07007892void __init sched_init(void)
7893{
Ingo Molnardd41f592007-07-09 18:51:59 +02007894 int i, j;
Mike Travis434d53b2008-04-04 18:11:04 -07007895 unsigned long alloc_size = 0, ptr;
7896
7897#ifdef CONFIG_FAIR_GROUP_SCHED
7898 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7899#endif
7900#ifdef CONFIG_RT_GROUP_SCHED
7901 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7902#endif
Rusty Russelldf7c8e82009-03-19 15:22:20 +10307903#ifdef CONFIG_CPUMASK_OFFSTACK
Rusty Russell8c083f02009-03-19 15:22:20 +10307904 alloc_size += num_possible_cpus() * cpumask_size();
Rusty Russelldf7c8e82009-03-19 15:22:20 +10307905#endif
Mike Travis434d53b2008-04-04 18:11:04 -07007906 if (alloc_size) {
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03007907 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
Mike Travis434d53b2008-04-04 18:11:04 -07007908
7909#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08007910 root_task_group.se = (struct sched_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07007911 ptr += nr_cpu_ids * sizeof(void **);
7912
Yong Zhang07e06b02011-01-07 15:17:36 +08007913 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07007914 ptr += nr_cpu_ids * sizeof(void **);
Peter Zijlstraeff766a2008-04-19 19:45:00 +02007915
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007916#endif /* CONFIG_FAIR_GROUP_SCHED */
Mike Travis434d53b2008-04-04 18:11:04 -07007917#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08007918 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07007919 ptr += nr_cpu_ids * sizeof(void **);
7920
Yong Zhang07e06b02011-01-07 15:17:36 +08007921 root_task_group.rt_rq = (struct rt_rq **)ptr;
Peter Zijlstraeff766a2008-04-19 19:45:00 +02007922 ptr += nr_cpu_ids * sizeof(void **);
7923
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007924#endif /* CONFIG_RT_GROUP_SCHED */
Rusty Russelldf7c8e82009-03-19 15:22:20 +10307925#ifdef CONFIG_CPUMASK_OFFSTACK
7926 for_each_possible_cpu(i) {
7927 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
7928 ptr += cpumask_size();
7929 }
7930#endif /* CONFIG_CPUMASK_OFFSTACK */
Mike Travis434d53b2008-04-04 18:11:04 -07007931 }
Ingo Molnardd41f592007-07-09 18:51:59 +02007932
Gregory Haskins57d885f2008-01-25 21:08:18 +01007933#ifdef CONFIG_SMP
7934 init_defrootdomain();
7935#endif
7936
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007937 init_rt_bandwidth(&def_rt_bandwidth,
7938 global_rt_period(), global_rt_runtime());
7939
7940#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08007941 init_rt_bandwidth(&root_task_group.rt_bandwidth,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007942 global_rt_period(), global_rt_runtime());
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007943#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007944
Dhaval Giani7c941432010-01-20 13:26:18 +01007945#ifdef CONFIG_CGROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08007946 list_add(&root_task_group.list, &task_groups);
7947 INIT_LIST_HEAD(&root_task_group.children);
Mike Galbraith5091faa2010-11-30 14:18:03 +01007948 autogroup_init(&init_task);
Dhaval Giani7c941432010-01-20 13:26:18 +01007949#endif /* CONFIG_CGROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007950
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08007951 for_each_possible_cpu(i) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07007952 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953
7954 rq = cpu_rq(i);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007955 raw_spin_lock_init(&rq->lock);
Nick Piggin78979862005-06-25 14:57:13 -07007956 rq->nr_running = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02007957 rq->calc_load_active = 0;
7958 rq->calc_load_update = jiffies + LOAD_FREQ;
Ingo Molnardd41f592007-07-09 18:51:59 +02007959 init_cfs_rq(&rq->cfs, rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01007960 init_rt_rq(&rq->rt, rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007961#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08007962 root_task_group.shares = root_task_group_load;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007963 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02007964 /*
Yong Zhang07e06b02011-01-07 15:17:36 +08007965 * How much cpu bandwidth does root_task_group get?
Dhaval Giani354d60c2008-04-19 19:44:59 +02007966 *
7967 * In case of task-groups formed thr' the cgroup filesystem, it
7968 * gets 100% of the cpu resources in the system. This overall
7969 * system cpu resource is divided among the tasks of
Yong Zhang07e06b02011-01-07 15:17:36 +08007970 * root_task_group and its child task-groups in a fair manner,
Dhaval Giani354d60c2008-04-19 19:44:59 +02007971 * based on each entity's (task or task-group's) weight
7972 * (se->load.weight).
7973 *
Yong Zhang07e06b02011-01-07 15:17:36 +08007974 * In other words, if root_task_group has 10 tasks of weight
Dhaval Giani354d60c2008-04-19 19:44:59 +02007975 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7976 * then A0's share of the cpu resource is:
7977 *
Ingo Molnar0d905bc2009-05-04 19:13:30 +02007978 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
Dhaval Giani354d60c2008-04-19 19:44:59 +02007979 *
Yong Zhang07e06b02011-01-07 15:17:36 +08007980 * We achieve this by letting root_task_group's tasks sit
7981 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
Dhaval Giani354d60c2008-04-19 19:44:59 +02007982 */
Yong Zhang07e06b02011-01-07 15:17:36 +08007983 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02007984#endif /* CONFIG_FAIR_GROUP_SCHED */
7985
7986 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007987#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007988 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
Yong Zhang07e06b02011-01-07 15:17:36 +08007989 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007990#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07007991
Ingo Molnardd41f592007-07-09 18:51:59 +02007992 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7993 rq->cpu_load[j] = 0;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07007994
7995 rq->last_load_update_tick = jiffies;
7996
Linus Torvalds1da177e2005-04-16 15:20:36 -07007997#ifdef CONFIG_SMP
Nick Piggin41c7ce92005-06-25 14:57:24 -07007998 rq->sd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01007999 rq->rd = NULL;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02008000 rq->cpu_power = SCHED_LOAD_SCALE;
Gregory Haskins3f029d32009-07-29 11:08:47 -04008001 rq->post_schedule = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008002 rq->active_balance = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02008003 rq->next_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008004 rq->push_cpu = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07008005 rq->cpu = i;
Gregory Haskins1f11eb62008-06-04 15:04:05 -04008006 rq->online = 0;
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01008007 rq->idle_stamp = 0;
8008 rq->avg_idle = 2*sysctl_sched_migration_cost;
Gregory Haskinsdc938522008-01-25 21:08:26 +01008009 rq_attach_root(rq, &def_root_domain);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008010#ifdef CONFIG_NO_HZ
8011 rq->nohz_balance_kick = 0;
8012 init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
8013#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008014#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01008015 init_rq_hrtick(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008016 atomic_set(&rq->nr_iowait, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008017 }
8018
Peter Williams2dd73a42006-06-27 02:54:34 -07008019 set_load_weight(&init_task);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008020
Avi Kivitye107be32007-07-26 13:40:43 +02008021#ifdef CONFIG_PREEMPT_NOTIFIERS
8022 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
8023#endif
8024
Christoph Lameterc9819f42006-12-10 02:20:25 -08008025#ifdef CONFIG_SMP
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008026 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
Christoph Lameterc9819f42006-12-10 02:20:25 -08008027#endif
8028
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008029#ifdef CONFIG_RT_MUTEXES
Thomas Gleixner1d615482009-11-17 14:54:03 +01008030 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008031#endif
8032
Linus Torvalds1da177e2005-04-16 15:20:36 -07008033 /*
8034 * The boot idle thread does lazy MMU switching as well:
8035 */
8036 atomic_inc(&init_mm.mm_count);
8037 enter_lazy_tlb(&init_mm, current);
8038
8039 /*
8040 * Make us the idle thread. Technically, schedule() should not be
8041 * called from this thread, however somewhere below it might be,
8042 * but because we are the idle thread, we just pick up running again
8043 * when this runqueue becomes "idle".
8044 */
8045 init_idle(current, smp_processor_id());
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02008046
8047 calc_load_update = jiffies + LOAD_FREQ;
8048
Ingo Molnardd41f592007-07-09 18:51:59 +02008049 /*
8050 * During early bootup we pretend to be a normal task:
8051 */
8052 current->sched_class = &fair_sched_class;
Ingo Molnar6892b752008-02-13 14:02:36 +01008053
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308054 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
Rusty Russell49557e62009-11-02 20:37:20 +10308055 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308056#ifdef CONFIG_SMP
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308057#ifdef CONFIG_NO_HZ
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008058 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8059 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
8060 atomic_set(&nohz.load_balancer, nr_cpu_ids);
8061 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
8062 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308063#endif
Rusty Russellbdddd292009-12-02 14:09:16 +10308064 /* May be allocated at isolcpus cmdline parse time */
8065 if (cpu_isolated_map == NULL)
8066 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308067#endif /* SMP */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308068
Ingo Molnar6892b752008-02-13 14:02:36 +01008069 scheduler_running = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008070}
8071
8072#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008073static inline int preempt_count_equals(int preempt_offset)
8074{
Frederic Weisbecker234da7b2009-12-16 20:21:05 +01008075 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008076
8077 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
8078}
8079
Simon Kagstromd8948372009-12-23 11:08:18 +01008080void __might_sleep(const char *file, int line, int preempt_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008081{
Ingo Molnar48f24c42006-07-03 00:25:40 -07008082#ifdef in_atomic
Linus Torvalds1da177e2005-04-16 15:20:36 -07008083 static unsigned long prev_jiffy; /* ratelimiting */
8084
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008085 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8086 system_state != SYSTEM_RUNNING || oops_in_progress)
Ingo Molnaraef745f2008-08-28 11:34:43 +02008087 return;
8088 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8089 return;
8090 prev_jiffy = jiffies;
8091
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01008092 printk(KERN_ERR
8093 "BUG: sleeping function called from invalid context at %s:%d\n",
8094 file, line);
8095 printk(KERN_ERR
8096 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8097 in_atomic(), irqs_disabled(),
8098 current->pid, current->comm);
Ingo Molnaraef745f2008-08-28 11:34:43 +02008099
8100 debug_show_held_locks(current);
8101 if (irqs_disabled())
8102 print_irqtrace_events(current);
8103 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008104#endif
8105}
8106EXPORT_SYMBOL(__might_sleep);
8107#endif
8108
8109#ifdef CONFIG_MAGIC_SYSRQ
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008110static void normalize_task(struct rq *rq, struct task_struct *p)
8111{
8112 int on_rq;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02008113
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008114 on_rq = p->se.on_rq;
8115 if (on_rq)
8116 deactivate_task(rq, p, 0);
8117 __setscheduler(rq, p, SCHED_NORMAL, 0);
8118 if (on_rq) {
8119 activate_task(rq, p, 0);
8120 resched_task(rq->curr);
8121 }
8122}
8123
Linus Torvalds1da177e2005-04-16 15:20:36 -07008124void normalize_rt_tasks(void)
8125{
Ingo Molnara0f98a12007-06-17 18:37:45 +02008126 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008127 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07008128 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008129
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008130 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008131 do_each_thread(g, p) {
Ingo Molnar178be792007-10-15 17:00:18 +02008132 /*
8133 * Only normalize user tasks:
8134 */
8135 if (!p->mm)
8136 continue;
8137
Ingo Molnardd41f592007-07-09 18:51:59 +02008138 p->se.exec_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008139#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03008140 p->se.statistics.wait_start = 0;
8141 p->se.statistics.sleep_start = 0;
8142 p->se.statistics.block_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008143#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02008144
8145 if (!rt_task(p)) {
8146 /*
8147 * Renice negative nice level userspace
8148 * tasks back to 0:
8149 */
8150 if (TASK_NICE(p) < 0 && p->mm)
8151 set_user_nice(p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008152 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02008153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008154
Thomas Gleixner1d615482009-11-17 14:54:03 +01008155 raw_spin_lock(&p->pi_lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -07008156 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008157
Ingo Molnar178be792007-10-15 17:00:18 +02008158 normalize_task(rq, p);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008159
Ingo Molnarb29739f2006-06-27 02:54:51 -07008160 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01008161 raw_spin_unlock(&p->pi_lock);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008162 } while_each_thread(g, p);
8163
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008164 read_unlock_irqrestore(&tasklist_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008165}
8166
8167#endif /* CONFIG_MAGIC_SYSRQ */
Linus Torvalds1df5c102005-09-12 07:59:21 -07008168
Jason Wessel67fc4e02010-05-20 21:04:21 -05008169#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008170/*
Jason Wessel67fc4e02010-05-20 21:04:21 -05008171 * These functions are only useful for the IA64 MCA handling, or kdb.
Linus Torvalds1df5c102005-09-12 07:59:21 -07008172 *
8173 * They can only be called when the whole system has been
8174 * stopped - every CPU needs to be quiescent, and no scheduling
8175 * activity can take place. Using them for anything else would
8176 * be a serious bug, and as a result, they aren't even visible
8177 * under any other configuration.
8178 */
8179
8180/**
8181 * curr_task - return the current task for a given cpu.
8182 * @cpu: the processor in question.
8183 *
8184 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8185 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008186struct task_struct *curr_task(int cpu)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008187{
8188 return cpu_curr(cpu);
8189}
8190
Jason Wessel67fc4e02010-05-20 21:04:21 -05008191#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8192
8193#ifdef CONFIG_IA64
Linus Torvalds1df5c102005-09-12 07:59:21 -07008194/**
8195 * set_curr_task - set the current task for a given cpu.
8196 * @cpu: the processor in question.
8197 * @p: the task pointer to set.
8198 *
8199 * Description: This function must only be used when non-maskable interrupts
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008200 * are serviced on a separate stack. It allows the architecture to switch the
8201 * notion of the current task on a cpu in a non-blocking manner. This function
Linus Torvalds1df5c102005-09-12 07:59:21 -07008202 * must be called with all CPU's synchronized, and interrupts disabled, the
8203 * and caller must save the original value of the current task (see
8204 * curr_task() above) and restore that value before reenabling interrupts and
8205 * re-starting the system.
8206 *
8207 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8208 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008209void set_curr_task(int cpu, struct task_struct *p)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008210{
8211 cpu_curr(cpu) = p;
8212}
8213
8214#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008215
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008216#ifdef CONFIG_FAIR_GROUP_SCHED
8217static void free_fair_sched_group(struct task_group *tg)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008218{
8219 int i;
8220
8221 for_each_possible_cpu(i) {
8222 if (tg->cfs_rq)
8223 kfree(tg->cfs_rq[i]);
8224 if (tg->se)
8225 kfree(tg->se[i]);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008226 }
8227
8228 kfree(tg->cfs_rq);
8229 kfree(tg->se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008230}
8231
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008232static
8233int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008234{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008235 struct cfs_rq *cfs_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008236 struct sched_entity *se;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008237 struct rq *rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008238 int i;
8239
Mike Travis434d53b2008-04-04 18:11:04 -07008240 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008241 if (!tg->cfs_rq)
8242 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008243 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008244 if (!tg->se)
8245 goto err;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008246
8247 tg->shares = NICE_0_LOAD;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008248
8249 for_each_possible_cpu(i) {
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008250 rq = cpu_rq(i);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008251
Li Zefaneab17222008-10-29 17:03:22 +08008252 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8253 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008254 if (!cfs_rq)
8255 goto err;
8256
Li Zefaneab17222008-10-29 17:03:22 +08008257 se = kzalloc_node(sizeof(struct sched_entity),
8258 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008259 if (!se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008260 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008261
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008262 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008263 }
8264
8265 return 1;
8266
Peter Zijlstra49246272010-10-17 21:46:10 +02008267err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008268 kfree(cfs_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008269err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008270 return 0;
8271}
8272
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008273static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8274{
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008275 struct rq *rq = cpu_rq(cpu);
8276 unsigned long flags;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008277
8278 /*
8279 * Only empty task groups can be destroyed; so we can speculatively
8280 * check on_list without danger of it being re-added.
8281 */
8282 if (!tg->cfs_rq[cpu]->on_list)
8283 return;
8284
8285 raw_spin_lock_irqsave(&rq->lock, flags);
Paul Turner822bc182010-11-29 16:55:40 -08008286 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008287 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008288}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008289#else /* !CONFG_FAIR_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008290static inline void free_fair_sched_group(struct task_group *tg)
8291{
8292}
8293
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008294static inline
8295int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008296{
8297 return 1;
8298}
8299
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008300static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8301{
8302}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008303#endif /* CONFIG_FAIR_GROUP_SCHED */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008304
8305#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008306static void free_rt_sched_group(struct task_group *tg)
8307{
8308 int i;
8309
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008310 destroy_rt_bandwidth(&tg->rt_bandwidth);
8311
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008312 for_each_possible_cpu(i) {
8313 if (tg->rt_rq)
8314 kfree(tg->rt_rq[i]);
8315 if (tg->rt_se)
8316 kfree(tg->rt_se[i]);
8317 }
8318
8319 kfree(tg->rt_rq);
8320 kfree(tg->rt_se);
8321}
8322
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008323static
8324int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008325{
8326 struct rt_rq *rt_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008327 struct sched_rt_entity *rt_se;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008328 struct rq *rq;
8329 int i;
8330
Mike Travis434d53b2008-04-04 18:11:04 -07008331 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008332 if (!tg->rt_rq)
8333 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008334 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008335 if (!tg->rt_se)
8336 goto err;
8337
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008338 init_rt_bandwidth(&tg->rt_bandwidth,
8339 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008340
8341 for_each_possible_cpu(i) {
8342 rq = cpu_rq(i);
8343
Li Zefaneab17222008-10-29 17:03:22 +08008344 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8345 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008346 if (!rt_rq)
8347 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008348
Li Zefaneab17222008-10-29 17:03:22 +08008349 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8350 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008351 if (!rt_se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008352 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008353
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008354 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008355 }
8356
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008357 return 1;
8358
Peter Zijlstra49246272010-10-17 21:46:10 +02008359err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008360 kfree(rt_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008361err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008362 return 0;
8363}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008364#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008365static inline void free_rt_sched_group(struct task_group *tg)
8366{
8367}
8368
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008369static inline
8370int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008371{
8372 return 1;
8373}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008374#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008375
Dhaval Giani7c941432010-01-20 13:26:18 +01008376#ifdef CONFIG_CGROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008377static void free_sched_group(struct task_group *tg)
8378{
8379 free_fair_sched_group(tg);
8380 free_rt_sched_group(tg);
Mike Galbraithe9aa1dd2011-01-05 11:11:25 +01008381 autogroup_free(tg);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008382 kfree(tg);
8383}
8384
8385/* allocate runqueue etc for a new task group */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008386struct task_group *sched_create_group(struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008387{
8388 struct task_group *tg;
8389 unsigned long flags;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008390
8391 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8392 if (!tg)
8393 return ERR_PTR(-ENOMEM);
8394
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008395 if (!alloc_fair_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008396 goto err;
8397
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008398 if (!alloc_rt_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008399 goto err;
8400
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008401 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008402 list_add_rcu(&tg->list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008403
8404 WARN_ON(!parent); /* root should already exist */
8405
8406 tg->parent = parent;
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008407 INIT_LIST_HEAD(&tg->children);
Zhang, Yanmin09f27242030-08-14 15:56:40 +08008408 list_add_rcu(&tg->siblings, &parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008409 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008410
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008411 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008412
8413err:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008414 free_sched_group(tg);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008415 return ERR_PTR(-ENOMEM);
8416}
8417
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008418/* rcu callback to free various structures associated with a task group */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008419static void free_sched_group_rcu(struct rcu_head *rhp)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008420{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008421 /* now it should be safe to free those cfs_rqs */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008422 free_sched_group(container_of(rhp, struct task_group, rcu));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008423}
8424
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008425/* Destroy runqueue etc associated with a task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008426void sched_destroy_group(struct task_group *tg)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008427{
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008428 unsigned long flags;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008429 int i;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008430
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008431 /* end participation in shares distribution */
8432 for_each_possible_cpu(i)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008433 unregister_fair_sched_group(tg, i);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008434
8435 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008436 list_del_rcu(&tg->list);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008437 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008438 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008439
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008440 /* wait for possible concurrent references to cfs_rqs complete */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008441 call_rcu(&tg->rcu, free_sched_group_rcu);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008442}
8443
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008444/* change task's runqueue when it moves between groups.
Ingo Molnar3a252012007-10-15 17:00:12 +02008445 * The caller of this function should have put the task in its new group
8446 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8447 * reflect its new group.
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008448 */
8449void sched_move_task(struct task_struct *tsk)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008450{
8451 int on_rq, running;
8452 unsigned long flags;
8453 struct rq *rq;
8454
8455 rq = task_rq_lock(tsk, &flags);
8456
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01008457 running = task_current(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008458 on_rq = tsk->se.on_rq;
8459
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008460 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008461 dequeue_task(rq, tsk, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008462 if (unlikely(running))
8463 tsk->sched_class->put_prev_task(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008464
Peter Zijlstra810b3812008-02-29 15:21:01 -05008465#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008466 if (tsk->sched_class->task_move_group)
8467 tsk->sched_class->task_move_group(tsk, on_rq);
8468 else
Peter Zijlstra810b3812008-02-29 15:21:01 -05008469#endif
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008470 set_task_rq(tsk, task_cpu(tsk));
Peter Zijlstra810b3812008-02-29 15:21:01 -05008471
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008472 if (unlikely(running))
8473 tsk->sched_class->set_curr_task(rq);
8474 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01008475 enqueue_task(rq, tsk, 0);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008476
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008477 task_rq_unlock(rq, &flags);
8478}
Dhaval Giani7c941432010-01-20 13:26:18 +01008479#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008480
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008481#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008482static DEFINE_MUTEX(shares_mutex);
8483
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008484int sched_group_set_shares(struct task_group *tg, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008485{
8486 int i;
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008487 unsigned long flags;
Ingo Molnarc61935f2008-01-22 11:24:58 +01008488
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008489 /*
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008490 * We can't change the weight of the root cgroup.
8491 */
8492 if (!tg->se[0])
8493 return -EINVAL;
8494
Peter Zijlstra18d95a22008-04-19 19:45:00 +02008495 if (shares < MIN_SHARES)
8496 shares = MIN_SHARES;
Miao Xiecb4ad1f2008-04-28 12:54:56 +08008497 else if (shares > MAX_SHARES)
8498 shares = MAX_SHARES;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008499
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008500 mutex_lock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008501 if (tg->shares == shares)
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008502 goto done;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008503
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008504 tg->shares = shares;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008505 for_each_possible_cpu(i) {
Paul Turner94371782010-11-15 15:47:10 -08008506 struct rq *rq = cpu_rq(i);
8507 struct sched_entity *se;
8508
8509 se = tg->se[i];
8510 /* Propagate contribution to hierarchy */
8511 raw_spin_lock_irqsave(&rq->lock, flags);
8512 for_each_sched_entity(se)
8513 update_cfs_shares(group_cfs_rq(se), 0);
8514 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008515 }
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008516
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008517done:
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008518 mutex_unlock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008519 return 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008520}
8521
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008522unsigned long sched_group_shares(struct task_group *tg)
8523{
8524 return tg->shares;
8525}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008526#endif
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008527
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008528#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008529/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008530 * Ensure that the real time constraints are schedulable.
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008531 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008532static DEFINE_MUTEX(rt_constraints_mutex);
8533
8534static unsigned long to_ratio(u64 period, u64 runtime)
8535{
8536 if (runtime == RUNTIME_INF)
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008537 return 1ULL << 20;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008538
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008539 return div64_u64(runtime << 20, period);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008540}
8541
Dhaval Giani521f1a242008-02-28 15:21:56 +05308542/* Must be called with tasklist_lock held */
8543static inline int tg_has_rt_tasks(struct task_group *tg)
8544{
8545 struct task_struct *g, *p;
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008546
Dhaval Giani521f1a242008-02-28 15:21:56 +05308547 do_each_thread(g, p) {
8548 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8549 return 1;
8550 } while_each_thread(g, p);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008551
Dhaval Giani521f1a242008-02-28 15:21:56 +05308552 return 0;
8553}
8554
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008555struct rt_schedulable_data {
8556 struct task_group *tg;
8557 u64 rt_period;
8558 u64 rt_runtime;
8559};
8560
8561static int tg_schedulable(struct task_group *tg, void *data)
8562{
8563 struct rt_schedulable_data *d = data;
8564 struct task_group *child;
8565 unsigned long total, sum = 0;
8566 u64 period, runtime;
8567
8568 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8569 runtime = tg->rt_bandwidth.rt_runtime;
8570
8571 if (tg == d->tg) {
8572 period = d->rt_period;
8573 runtime = d->rt_runtime;
8574 }
8575
Peter Zijlstra4653f802008-09-23 15:33:44 +02008576 /*
8577 * Cannot have more runtime than the period.
8578 */
8579 if (runtime > period && runtime != RUNTIME_INF)
8580 return -EINVAL;
8581
8582 /*
8583 * Ensure we don't starve existing RT tasks.
8584 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008585 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8586 return -EBUSY;
8587
8588 total = to_ratio(period, runtime);
8589
Peter Zijlstra4653f802008-09-23 15:33:44 +02008590 /*
8591 * Nobody can have more than the global setting allows.
8592 */
8593 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8594 return -EINVAL;
8595
8596 /*
8597 * The sum of our children's runtime should not exceed our own.
8598 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008599 list_for_each_entry_rcu(child, &tg->children, siblings) {
8600 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8601 runtime = child->rt_bandwidth.rt_runtime;
8602
8603 if (child == d->tg) {
8604 period = d->rt_period;
8605 runtime = d->rt_runtime;
8606 }
8607
8608 sum += to_ratio(period, runtime);
8609 }
8610
8611 if (sum > total)
8612 return -EINVAL;
8613
8614 return 0;
8615}
8616
8617static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8618{
8619 struct rt_schedulable_data data = {
8620 .tg = tg,
8621 .rt_period = period,
8622 .rt_runtime = runtime,
8623 };
8624
8625 return walk_tg_tree(tg_schedulable, tg_nop, &data);
8626}
8627
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008628static int tg_set_bandwidth(struct task_group *tg,
8629 u64 rt_period, u64 rt_runtime)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008630{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008631 int i, err = 0;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008632
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008633 mutex_lock(&rt_constraints_mutex);
Dhaval Giani521f1a242008-02-28 15:21:56 +05308634 read_lock(&tasklist_lock);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008635 err = __rt_schedulable(tg, rt_period, rt_runtime);
8636 if (err)
Dhaval Giani521f1a242008-02-28 15:21:56 +05308637 goto unlock;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008638
Thomas Gleixner0986b112009-11-17 15:32:06 +01008639 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008640 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8641 tg->rt_bandwidth.rt_runtime = rt_runtime;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008642
8643 for_each_possible_cpu(i) {
8644 struct rt_rq *rt_rq = tg->rt_rq[i];
8645
Thomas Gleixner0986b112009-11-17 15:32:06 +01008646 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008647 rt_rq->rt_runtime = rt_runtime;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008648 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008649 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008650 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstra49246272010-10-17 21:46:10 +02008651unlock:
Dhaval Giani521f1a242008-02-28 15:21:56 +05308652 read_unlock(&tasklist_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008653 mutex_unlock(&rt_constraints_mutex);
8654
8655 return err;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008656}
8657
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008658int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8659{
8660 u64 rt_runtime, rt_period;
8661
8662 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8663 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8664 if (rt_runtime_us < 0)
8665 rt_runtime = RUNTIME_INF;
8666
8667 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8668}
8669
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008670long sched_group_rt_runtime(struct task_group *tg)
8671{
8672 u64 rt_runtime_us;
8673
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008674 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008675 return -1;
8676
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008677 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008678 do_div(rt_runtime_us, NSEC_PER_USEC);
8679 return rt_runtime_us;
8680}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008681
8682int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8683{
8684 u64 rt_runtime, rt_period;
8685
8686 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8687 rt_runtime = tg->rt_bandwidth.rt_runtime;
8688
Raistlin619b0482008-06-26 18:54:09 +02008689 if (rt_period == 0)
8690 return -EINVAL;
8691
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008692 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8693}
8694
8695long sched_group_rt_period(struct task_group *tg)
8696{
8697 u64 rt_period_us;
8698
8699 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8700 do_div(rt_period_us, NSEC_PER_USEC);
8701 return rt_period_us;
8702}
8703
8704static int sched_rt_global_constraints(void)
8705{
Peter Zijlstra4653f802008-09-23 15:33:44 +02008706 u64 runtime, period;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008707 int ret = 0;
8708
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008709 if (sysctl_sched_rt_period <= 0)
8710 return -EINVAL;
8711
Peter Zijlstra4653f802008-09-23 15:33:44 +02008712 runtime = global_rt_runtime();
8713 period = global_rt_period();
8714
8715 /*
8716 * Sanity check on the sysctl variables.
8717 */
8718 if (runtime > period && runtime != RUNTIME_INF)
8719 return -EINVAL;
Peter Zijlstra10b612f2008-06-19 14:22:27 +02008720
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008721 mutex_lock(&rt_constraints_mutex);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008722 read_lock(&tasklist_lock);
Peter Zijlstra4653f802008-09-23 15:33:44 +02008723 ret = __rt_schedulable(NULL, 0, 0);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008724 read_unlock(&tasklist_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008725 mutex_unlock(&rt_constraints_mutex);
8726
8727 return ret;
8728}
Dhaval Giani54e99122009-02-27 15:13:54 +05308729
8730int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8731{
8732 /* Don't accept realtime tasks when there is no way for them to run */
8733 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8734 return 0;
8735
8736 return 1;
8737}
8738
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008739#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008740static int sched_rt_global_constraints(void)
8741{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008742 unsigned long flags;
8743 int i;
8744
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008745 if (sysctl_sched_rt_period <= 0)
8746 return -EINVAL;
8747
Peter Zijlstra60aa6052009-05-05 17:50:21 +02008748 /*
8749 * There's always some RT tasks in the root group
8750 * -- migration, kstopmachine etc..
8751 */
8752 if (sysctl_sched_rt_runtime == 0)
8753 return -EBUSY;
8754
Thomas Gleixner0986b112009-11-17 15:32:06 +01008755 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008756 for_each_possible_cpu(i) {
8757 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8758
Thomas Gleixner0986b112009-11-17 15:32:06 +01008759 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008760 rt_rq->rt_runtime = global_rt_runtime();
Thomas Gleixner0986b112009-11-17 15:32:06 +01008761 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008762 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008763 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008764
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008765 return 0;
8766}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008767#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008768
8769int sched_rt_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008770 void __user *buffer, size_t *lenp,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008771 loff_t *ppos)
8772{
8773 int ret;
8774 int old_period, old_runtime;
8775 static DEFINE_MUTEX(mutex);
8776
8777 mutex_lock(&mutex);
8778 old_period = sysctl_sched_rt_period;
8779 old_runtime = sysctl_sched_rt_runtime;
8780
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008781 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008782
8783 if (!ret && write) {
8784 ret = sched_rt_global_constraints();
8785 if (ret) {
8786 sysctl_sched_rt_period = old_period;
8787 sysctl_sched_rt_runtime = old_runtime;
8788 } else {
8789 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8790 def_rt_bandwidth.rt_period =
8791 ns_to_ktime(global_rt_period());
8792 }
8793 }
8794 mutex_unlock(&mutex);
8795
8796 return ret;
8797}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008798
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008799#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008800
8801/* return corresponding task_group object of a cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +02008802static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008803{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008804 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
8805 struct task_group, css);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008806}
8807
8808static struct cgroup_subsys_state *
Paul Menage2b01dfe2007-10-24 18:23:50 +02008809cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008810{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008811 struct task_group *tg, *parent;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008812
Paul Menage2b01dfe2007-10-24 18:23:50 +02008813 if (!cgrp->parent) {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008814 /* This is early initialization for the top cgroup */
Yong Zhang07e06b02011-01-07 15:17:36 +08008815 return &root_task_group.css;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008816 }
8817
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008818 parent = cgroup_tg(cgrp->parent);
8819 tg = sched_create_group(parent);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008820 if (IS_ERR(tg))
8821 return ERR_PTR(-ENOMEM);
8822
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008823 return &tg->css;
8824}
8825
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008826static void
8827cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008828{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008829 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008830
8831 sched_destroy_group(tg);
8832}
8833
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008834static int
Ben Blumbe367d02009-09-23 15:56:31 -07008835cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008836{
Peter Zijlstrab68aa232008-02-13 15:45:40 +01008837#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Giani54e99122009-02-27 15:13:54 +05308838 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
Peter Zijlstrab68aa232008-02-13 15:45:40 +01008839 return -EINVAL;
8840#else
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008841 /* We don't support RT-tasks being in separate groups */
8842 if (tsk->sched_class != &fair_sched_class)
8843 return -EINVAL;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01008844#endif
Ben Blumbe367d02009-09-23 15:56:31 -07008845 return 0;
8846}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008847
Ben Blumbe367d02009-09-23 15:56:31 -07008848static int
8849cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
8850 struct task_struct *tsk, bool threadgroup)
8851{
8852 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
8853 if (retval)
8854 return retval;
8855 if (threadgroup) {
8856 struct task_struct *c;
8857 rcu_read_lock();
8858 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
8859 retval = cpu_cgroup_can_attach_task(cgrp, c);
8860 if (retval) {
8861 rcu_read_unlock();
8862 return retval;
8863 }
8864 }
8865 rcu_read_unlock();
8866 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008867 return 0;
8868}
8869
8870static void
Paul Menage2b01dfe2007-10-24 18:23:50 +02008871cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
Ben Blumbe367d02009-09-23 15:56:31 -07008872 struct cgroup *old_cont, struct task_struct *tsk,
8873 bool threadgroup)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008874{
8875 sched_move_task(tsk);
Ben Blumbe367d02009-09-23 15:56:31 -07008876 if (threadgroup) {
8877 struct task_struct *c;
8878 rcu_read_lock();
8879 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
8880 sched_move_task(c);
8881 }
8882 rcu_read_unlock();
8883 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008884}
8885
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008886#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagef4c753b2008-04-29 00:59:56 -07008887static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
Paul Menage2b01dfe2007-10-24 18:23:50 +02008888 u64 shareval)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008889{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008890 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008891}
8892
Paul Menagef4c753b2008-04-29 00:59:56 -07008893static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008894{
Paul Menage2b01dfe2007-10-24 18:23:50 +02008895 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008896
8897 return (u64) tg->shares;
8898}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008899#endif /* CONFIG_FAIR_GROUP_SCHED */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008900
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008901#ifdef CONFIG_RT_GROUP_SCHED
Mirco Tischler0c708142008-05-14 16:05:46 -07008902static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
Paul Menage06ecb272008-04-29 01:00:06 -07008903 s64 val)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008904{
Paul Menage06ecb272008-04-29 01:00:06 -07008905 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008906}
8907
Paul Menage06ecb272008-04-29 01:00:06 -07008908static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008909{
Paul Menage06ecb272008-04-29 01:00:06 -07008910 return sched_group_rt_runtime(cgroup_tg(cgrp));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008911}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008912
8913static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
8914 u64 rt_period_us)
8915{
8916 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
8917}
8918
8919static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
8920{
8921 return sched_group_rt_period(cgroup_tg(cgrp));
8922}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008923#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008924
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008925static struct cftype cpu_files[] = {
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008926#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008927 {
8928 .name = "shares",
Paul Menagef4c753b2008-04-29 00:59:56 -07008929 .read_u64 = cpu_shares_read_u64,
8930 .write_u64 = cpu_shares_write_u64,
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008931 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008932#endif
8933#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008934 {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008935 .name = "rt_runtime_us",
Paul Menage06ecb272008-04-29 01:00:06 -07008936 .read_s64 = cpu_rt_runtime_read,
8937 .write_s64 = cpu_rt_runtime_write,
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008938 },
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008939 {
8940 .name = "rt_period_us",
Paul Menagef4c753b2008-04-29 00:59:56 -07008941 .read_u64 = cpu_rt_period_read_uint,
8942 .write_u64 = cpu_rt_period_write_uint,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008943 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008944#endif
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008945};
8946
8947static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
8948{
Paul Menagefe5c7cc2007-10-29 21:18:11 +01008949 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008950}
8951
8952struct cgroup_subsys cpu_cgroup_subsys = {
Ingo Molnar38605ca2007-10-29 21:18:11 +01008953 .name = "cpu",
8954 .create = cpu_cgroup_create,
8955 .destroy = cpu_cgroup_destroy,
8956 .can_attach = cpu_cgroup_can_attach,
8957 .attach = cpu_cgroup_attach,
8958 .populate = cpu_cgroup_populate,
8959 .subsys_id = cpu_cgroup_subsys_id,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07008960 .early_init = 1,
8961};
8962
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008963#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008964
8965#ifdef CONFIG_CGROUP_CPUACCT
8966
8967/*
8968 * CPU accounting code for task groups.
8969 *
8970 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
8971 * (balbir@in.ibm.com).
8972 */
8973
Bharata B Rao934352f2008-11-10 20:41:13 +05308974/* track cpu usage of a group of tasks and its child groups */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008975struct cpuacct {
8976 struct cgroup_subsys_state css;
8977 /* cpuusage holds pointer to a u64-type object on every cpu */
Tejun Heo43cf38e2010-02-02 14:38:57 +09008978 u64 __percpu *cpuusage;
Bharata B Raoef12fef2009-03-31 10:02:22 +05308979 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
Bharata B Rao934352f2008-11-10 20:41:13 +05308980 struct cpuacct *parent;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008981};
8982
8983struct cgroup_subsys cpuacct_subsys;
8984
8985/* return cpu accounting group corresponding to this container */
Dhaval Giani32cd7562008-02-29 10:02:43 +05308986static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008987{
Dhaval Giani32cd7562008-02-29 10:02:43 +05308988 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008989 struct cpuacct, css);
8990}
8991
8992/* return cpu accounting group to which this task belongs */
8993static inline struct cpuacct *task_ca(struct task_struct *tsk)
8994{
8995 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
8996 struct cpuacct, css);
8997}
8998
8999/* create a new cpu accounting group */
9000static struct cgroup_subsys_state *cpuacct_create(
Dhaval Giani32cd7562008-02-29 10:02:43 +05309001 struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009002{
9003 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309004 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009005
9006 if (!ca)
Bharata B Raoef12fef2009-03-31 10:02:22 +05309007 goto out;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009008
9009 ca->cpuusage = alloc_percpu(u64);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309010 if (!ca->cpuusage)
9011 goto out_free_ca;
9012
9013 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9014 if (percpu_counter_init(&ca->cpustat[i], 0))
9015 goto out_free_counters;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009016
Bharata B Rao934352f2008-11-10 20:41:13 +05309017 if (cgrp->parent)
9018 ca->parent = cgroup_ca(cgrp->parent);
9019
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009020 return &ca->css;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309021
9022out_free_counters:
9023 while (--i >= 0)
9024 percpu_counter_destroy(&ca->cpustat[i]);
9025 free_percpu(ca->cpuusage);
9026out_free_ca:
9027 kfree(ca);
9028out:
9029 return ERR_PTR(-ENOMEM);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009030}
9031
9032/* destroy an existing cpu accounting group */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009033static void
Dhaval Giani32cd7562008-02-29 10:02:43 +05309034cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009035{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309036 struct cpuacct *ca = cgroup_ca(cgrp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309037 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009038
Bharata B Raoef12fef2009-03-31 10:02:22 +05309039 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9040 percpu_counter_destroy(&ca->cpustat[i]);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009041 free_percpu(ca->cpuusage);
9042 kfree(ca);
9043}
9044
Ken Chen720f5492008-12-15 22:02:01 -08009045static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9046{
Rusty Russellb36128c2009-02-20 16:29:08 +09009047 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009048 u64 data;
9049
9050#ifndef CONFIG_64BIT
9051 /*
9052 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
9053 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009054 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009055 data = *cpuusage;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009056 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009057#else
9058 data = *cpuusage;
9059#endif
9060
9061 return data;
9062}
9063
9064static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9065{
Rusty Russellb36128c2009-02-20 16:29:08 +09009066 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009067
9068#ifndef CONFIG_64BIT
9069 /*
9070 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
9071 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009072 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009073 *cpuusage = val;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009074 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009075#else
9076 *cpuusage = val;
9077#endif
9078}
9079
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009080/* return total cpu usage (in nanoseconds) of a group */
Dhaval Giani32cd7562008-02-29 10:02:43 +05309081static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009082{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309083 struct cpuacct *ca = cgroup_ca(cgrp);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009084 u64 totalcpuusage = 0;
9085 int i;
9086
Ken Chen720f5492008-12-15 22:02:01 -08009087 for_each_present_cpu(i)
9088 totalcpuusage += cpuacct_cpuusage_read(ca, i);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009089
9090 return totalcpuusage;
9091}
9092
Dhaval Giani0297b802008-02-29 10:02:44 +05309093static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
9094 u64 reset)
9095{
9096 struct cpuacct *ca = cgroup_ca(cgrp);
9097 int err = 0;
9098 int i;
9099
9100 if (reset) {
9101 err = -EINVAL;
9102 goto out;
9103 }
9104
Ken Chen720f5492008-12-15 22:02:01 -08009105 for_each_present_cpu(i)
9106 cpuacct_cpuusage_write(ca, i, 0);
Dhaval Giani0297b802008-02-29 10:02:44 +05309107
Dhaval Giani0297b802008-02-29 10:02:44 +05309108out:
9109 return err;
9110}
9111
Ken Chene9515c32008-12-15 22:04:15 -08009112static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
9113 struct seq_file *m)
9114{
9115 struct cpuacct *ca = cgroup_ca(cgroup);
9116 u64 percpu;
9117 int i;
9118
9119 for_each_present_cpu(i) {
9120 percpu = cpuacct_cpuusage_read(ca, i);
9121 seq_printf(m, "%llu ", (unsigned long long) percpu);
9122 }
9123 seq_printf(m, "\n");
9124 return 0;
9125}
9126
Bharata B Raoef12fef2009-03-31 10:02:22 +05309127static const char *cpuacct_stat_desc[] = {
9128 [CPUACCT_STAT_USER] = "user",
9129 [CPUACCT_STAT_SYSTEM] = "system",
9130};
9131
9132static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9133 struct cgroup_map_cb *cb)
9134{
9135 struct cpuacct *ca = cgroup_ca(cgrp);
9136 int i;
9137
9138 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
9139 s64 val = percpu_counter_read(&ca->cpustat[i]);
9140 val = cputime64_to_clock_t(val);
9141 cb->fill(cb, cpuacct_stat_desc[i], val);
9142 }
9143 return 0;
9144}
9145
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009146static struct cftype files[] = {
9147 {
9148 .name = "usage",
Paul Menagef4c753b2008-04-29 00:59:56 -07009149 .read_u64 = cpuusage_read,
9150 .write_u64 = cpuusage_write,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009151 },
Ken Chene9515c32008-12-15 22:04:15 -08009152 {
9153 .name = "usage_percpu",
9154 .read_seq_string = cpuacct_percpu_seq_read,
9155 },
Bharata B Raoef12fef2009-03-31 10:02:22 +05309156 {
9157 .name = "stat",
9158 .read_map = cpuacct_stats_show,
9159 },
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009160};
9161
Dhaval Giani32cd7562008-02-29 10:02:43 +05309162static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009163{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309164 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009165}
9166
9167/*
9168 * charge this task's execution time to its accounting group.
9169 *
9170 * called with rq->lock held.
9171 */
9172static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9173{
9174 struct cpuacct *ca;
Bharata B Rao934352f2008-11-10 20:41:13 +05309175 int cpu;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009176
Li Zefanc40c6f82009-02-26 15:40:15 +08009177 if (unlikely(!cpuacct_subsys.active))
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009178 return;
9179
Bharata B Rao934352f2008-11-10 20:41:13 +05309180 cpu = task_cpu(tsk);
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309181
9182 rcu_read_lock();
9183
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009184 ca = task_ca(tsk);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009185
Bharata B Rao934352f2008-11-10 20:41:13 +05309186 for (; ca; ca = ca->parent) {
Rusty Russellb36128c2009-02-20 16:29:08 +09009187 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009188 *cpuusage += cputime;
9189 }
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309190
9191 rcu_read_unlock();
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009192}
9193
Bharata B Raoef12fef2009-03-31 10:02:22 +05309194/*
Anton Blanchardfa535a72010-02-02 14:46:13 -08009195 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9196 * in cputime_t units. As a result, cpuacct_update_stats calls
9197 * percpu_counter_add with values large enough to always overflow the
9198 * per cpu batch limit causing bad SMP scalability.
9199 *
9200 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9201 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9202 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9203 */
9204#ifdef CONFIG_SMP
9205#define CPUACCT_BATCH \
9206 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9207#else
9208#define CPUACCT_BATCH 0
9209#endif
9210
9211/*
Bharata B Raoef12fef2009-03-31 10:02:22 +05309212 * Charge the system/user time to the task's accounting group.
9213 */
9214static void cpuacct_update_stats(struct task_struct *tsk,
9215 enum cpuacct_stat_index idx, cputime_t val)
9216{
9217 struct cpuacct *ca;
Anton Blanchardfa535a72010-02-02 14:46:13 -08009218 int batch = CPUACCT_BATCH;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309219
9220 if (unlikely(!cpuacct_subsys.active))
9221 return;
9222
9223 rcu_read_lock();
9224 ca = task_ca(tsk);
9225
9226 do {
Anton Blanchardfa535a72010-02-02 14:46:13 -08009227 __percpu_counter_add(&ca->cpustat[idx], val, batch);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309228 ca = ca->parent;
9229 } while (ca);
9230 rcu_read_unlock();
9231}
9232
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009233struct cgroup_subsys cpuacct_subsys = {
9234 .name = "cpuacct",
9235 .create = cpuacct_create,
9236 .destroy = cpuacct_destroy,
9237 .populate = cpuacct_populate,
9238 .subsys_id = cpuacct_subsys_id,
9239};
9240#endif /* CONFIG_CGROUP_CPUACCT */
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009241